Skip to content

vllm.entrypoints.cli

Modules:

Name Description
benchmark
collect_env
main

The CLI entrypoints of vLLM

openai
run_batch
serve
types

__all__ module-attribute

__all__: list[str] = [
    "BenchmarkLatencySubcommand",
    "BenchmarkServingSubcommand",
    "BenchmarkThroughputSubcommand",
]

BenchmarkLatencySubcommand

Bases: BenchmarkSubcommandBase

The latency subcommand for vllm bench.

Source code in vllm/entrypoints/cli/benchmark/latency.py
class BenchmarkLatencySubcommand(BenchmarkSubcommandBase):
    """ The `latency` subcommand for vllm bench. """

    name = "latency"
    help = "Benchmark the latency of a single batch of requests."

    @classmethod
    def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
        add_cli_args(parser)

    @staticmethod
    def cmd(args: argparse.Namespace) -> None:
        main(args)

help class-attribute instance-attribute

help = (
    "Benchmark the latency of a single batch of requests."
)

name class-attribute instance-attribute

name = 'latency'

add_cli_args classmethod

add_cli_args(parser: ArgumentParser) -> None
Source code in vllm/entrypoints/cli/benchmark/latency.py
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
    add_cli_args(parser)

cmd staticmethod

cmd(args: Namespace) -> None
Source code in vllm/entrypoints/cli/benchmark/latency.py
@staticmethod
def cmd(args: argparse.Namespace) -> None:
    main(args)

BenchmarkServingSubcommand

Bases: BenchmarkSubcommandBase

The serve subcommand for vllm bench.

Source code in vllm/entrypoints/cli/benchmark/serve.py
class BenchmarkServingSubcommand(BenchmarkSubcommandBase):
    """ The `serve` subcommand for vllm bench. """

    name = "serve"
    help = "Benchmark the online serving throughput."

    @classmethod
    def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
        add_cli_args(parser)

    @staticmethod
    def cmd(args: argparse.Namespace) -> None:
        main(args)

help class-attribute instance-attribute

help = 'Benchmark the online serving throughput.'

name class-attribute instance-attribute

name = 'serve'

add_cli_args classmethod

add_cli_args(parser: ArgumentParser) -> None
Source code in vllm/entrypoints/cli/benchmark/serve.py
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
    add_cli_args(parser)

cmd staticmethod

cmd(args: Namespace) -> None
Source code in vllm/entrypoints/cli/benchmark/serve.py
@staticmethod
def cmd(args: argparse.Namespace) -> None:
    main(args)

BenchmarkThroughputSubcommand

Bases: BenchmarkSubcommandBase

The throughput subcommand for vllm bench.

Source code in vllm/entrypoints/cli/benchmark/throughput.py
class BenchmarkThroughputSubcommand(BenchmarkSubcommandBase):
    """ The `throughput` subcommand for vllm bench. """

    name = "throughput"
    help = "Benchmark offline inference throughput."

    @classmethod
    def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
        add_cli_args(parser)

    @staticmethod
    def cmd(args: argparse.Namespace) -> None:
        main(args)

help class-attribute instance-attribute

help = 'Benchmark offline inference throughput.'

name class-attribute instance-attribute

name = 'throughput'

add_cli_args classmethod

add_cli_args(parser: ArgumentParser) -> None
Source code in vllm/entrypoints/cli/benchmark/throughput.py
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
    add_cli_args(parser)

cmd staticmethod

cmd(args: Namespace) -> None
Source code in vllm/entrypoints/cli/benchmark/throughput.py
@staticmethod
def cmd(args: argparse.Namespace) -> None:
    main(args)