Skip to content

vllm.benchmarks.utils

InfEncoder

Bases: JSONEncoder

Source code in vllm/benchmarks/utils.py
class InfEncoder(json.JSONEncoder):

    def clear_inf(self, o: Any):
        if isinstance(o, dict):
            return {k: self.clear_inf(v) for k, v in o.items()}
        elif isinstance(o, list):
            return [self.clear_inf(v) for v in o]
        elif isinstance(o, float) and math.isinf(o):
            return "inf"
        return o

    def iterencode(self, o: Any, *args, **kwargs) -> Any:
        return super().iterencode(self.clear_inf(o), *args, **kwargs)

clear_inf

clear_inf(o: Any)
Source code in vllm/benchmarks/utils.py
def clear_inf(self, o: Any):
    if isinstance(o, dict):
        return {k: self.clear_inf(v) for k, v in o.items()}
    elif isinstance(o, list):
        return [self.clear_inf(v) for v in o]
    elif isinstance(o, float) and math.isinf(o):
        return "inf"
    return o

iterencode

iterencode(o: Any, *args, **kwargs) -> Any
Source code in vllm/benchmarks/utils.py
def iterencode(self, o: Any, *args, **kwargs) -> Any:
    return super().iterencode(self.clear_inf(o), *args, **kwargs)

convert_to_pytorch_benchmark_format

convert_to_pytorch_benchmark_format(
    args: Namespace,
    metrics: dict[str, list],
    extra_info: dict[str, Any],
) -> list

Save the benchmark results in the format used by PyTorch OSS benchmark with on metric per record https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database

Source code in vllm/benchmarks/utils.py
def convert_to_pytorch_benchmark_format(args: argparse.Namespace,
                                        metrics: dict[str, list],
                                        extra_info: dict[str, Any]) -> list:
    """
    Save the benchmark results in the format used by PyTorch OSS benchmark with
    on metric per record
    https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
    """
    records = []
    if not os.environ.get("SAVE_TO_PYTORCH_BENCHMARK_FORMAT", False):
        return records

    for name, benchmark_values in metrics.items():
        record = {
            "benchmark": {
                "name": "vLLM benchmark",
                "extra_info": {
                    "args": vars(args),
                },
            },
            "model": {
                "name": args.model,
            },
            "metric": {
                "name": name,
                "benchmark_values": benchmark_values,
                "extra_info": extra_info,
            },
        }

        tp = record["benchmark"]["extra_info"]["args"].get(
            "tensor_parallel_size")
        # Save tensor_parallel_size parameter if it's part of the metadata
        if not tp and "tensor_parallel_size" in extra_info:
            record["benchmark"]["extra_info"]["args"][
                "tensor_parallel_size"] = extra_info["tensor_parallel_size"]

        records.append(record)

    return records

write_to_json

write_to_json(filename: str, records: list) -> None
Source code in vllm/benchmarks/utils.py
def write_to_json(filename: str, records: list) -> None:
    with open(filename, "w") as f:
        json.dump(records, f, cls=InfEncoder)