Skip to content

vllm.usage.usage_lib

_GLOBAL_RUNTIME_DATA module-attribute

_GLOBAL_RUNTIME_DATA = dict[str, Union[str, int, bool]]()

_USAGE_ENV_VARS_TO_COLLECT module-attribute

_USAGE_ENV_VARS_TO_COLLECT = [
    "VLLM_USE_MODELSCOPE",
    "VLLM_USE_TRITON_FLASH_ATTN",
    "VLLM_ATTENTION_BACKEND",
    "VLLM_USE_FLASHINFER_SAMPLER",
    "VLLM_PP_LAYER_PARTITION",
    "VLLM_USE_TRITON_AWQ",
    "VLLM_USE_V1",
    "VLLM_ENABLE_V1_MULTIPROCESSING",
]

_USAGE_STATS_DO_NOT_TRACK_PATH module-attribute

_USAGE_STATS_DO_NOT_TRACK_PATH = join(
    _config_home, "do_not_track"
)

_USAGE_STATS_ENABLED module-attribute

_USAGE_STATS_ENABLED = None

_USAGE_STATS_JSON_PATH module-attribute

_USAGE_STATS_JSON_PATH = join(
    _config_home, "usage_stats.json"
)

_USAGE_STATS_SERVER module-attribute

_USAGE_STATS_SERVER = VLLM_USAGE_STATS_SERVER

_config_home module-attribute

_config_home = VLLM_CONFIG_ROOT

usage_message module-attribute

usage_message = UsageMessage()

UsageContext

Bases: str, Enum

Source code in vllm/usage/usage_lib.py
class UsageContext(str, Enum):
    UNKNOWN_CONTEXT = "UNKNOWN_CONTEXT"
    LLM_CLASS = "LLM_CLASS"
    API_SERVER = "API_SERVER"
    OPENAI_API_SERVER = "OPENAI_API_SERVER"
    OPENAI_BATCH_RUNNER = "OPENAI_BATCH_RUNNER"
    ENGINE_CONTEXT = "ENGINE_CONTEXT"

API_SERVER class-attribute instance-attribute

API_SERVER = 'API_SERVER'

ENGINE_CONTEXT class-attribute instance-attribute

ENGINE_CONTEXT = 'ENGINE_CONTEXT'

LLM_CLASS class-attribute instance-attribute

LLM_CLASS = 'LLM_CLASS'

OPENAI_API_SERVER class-attribute instance-attribute

OPENAI_API_SERVER = 'OPENAI_API_SERVER'

OPENAI_BATCH_RUNNER class-attribute instance-attribute

OPENAI_BATCH_RUNNER = 'OPENAI_BATCH_RUNNER'

UNKNOWN_CONTEXT class-attribute instance-attribute

UNKNOWN_CONTEXT = 'UNKNOWN_CONTEXT'

UsageMessage

Collect platform information and send it to the usage stats server.

Source code in vllm/usage/usage_lib.py
class UsageMessage:
    """Collect platform information and send it to the usage stats server."""

    def __init__(self) -> None:
        # NOTE: vLLM's server _only_ support flat KV pair.
        # Do not use nested fields.

        self.uuid = str(uuid4())

        # Environment Information
        self.provider: Optional[str] = None
        self.num_cpu: Optional[int] = None
        self.cpu_type: Optional[str] = None
        self.cpu_family_model_stepping: Optional[str] = None
        self.total_memory: Optional[int] = None
        self.architecture: Optional[str] = None
        self.platform: Optional[str] = None
        self.cuda_runtime: Optional[str] = None
        self.gpu_count: Optional[int] = None
        self.gpu_type: Optional[str] = None
        self.gpu_memory_per_device: Optional[int] = None
        self.env_var_json: Optional[str] = None

        # vLLM Information
        self.model_architecture: Optional[str] = None
        self.vllm_version: Optional[str] = None
        self.context: Optional[str] = None

        # Metadata
        self.log_time: Optional[int] = None
        self.source: Optional[str] = None

    def report_usage(self,
                     model_architecture: str,
                     usage_context: UsageContext,
                     extra_kvs: Optional[dict[str, Any]] = None) -> None:
        t = Thread(target=self._report_usage_worker,
                   args=(model_architecture, usage_context, extra_kvs or {}),
                   daemon=True)
        t.start()

    def _report_usage_worker(self, model_architecture: str,
                             usage_context: UsageContext,
                             extra_kvs: dict[str, Any]) -> None:
        self._report_usage_once(model_architecture, usage_context, extra_kvs)
        self._report_continuous_usage()

    def _report_usage_once(self, model_architecture: str,
                           usage_context: UsageContext,
                           extra_kvs: dict[str, Any]) -> None:
        # Platform information
        from vllm.platforms import current_platform
        if current_platform.is_cuda_alike():
            self.gpu_count = cuda_device_count_stateless()
            self.gpu_type, self.gpu_memory_per_device = (
                cuda_get_device_properties(0, ("name", "total_memory")))
        if current_platform.is_cuda():
            self.cuda_runtime = torch.version.cuda
        if current_platform.is_tpu():
            try:
                import torch_xla
                self.gpu_count = torch_xla.runtime.world_size()
                self.gpu_type = torch_xla.tpu.get_tpu_type()
                self.gpu_memory_per_device = (
                    torch_xla.core.xla_model.get_memory_info()["bytes_limit"])
            except Exception:
                pass
        self.provider = _detect_cloud_provider()
        self.architecture = platform.machine()
        self.platform = platform.platform()
        self.total_memory = psutil.virtual_memory().total

        info = cpuinfo.get_cpu_info()
        self.num_cpu = info.get("count", None)
        self.cpu_type = info.get("brand_raw", "")
        self.cpu_family_model_stepping = ",".join([
            str(info.get("family", "")),
            str(info.get("model", "")),
            str(info.get("stepping", ""))
        ])

        # vLLM information
        self.context = usage_context.value
        self.vllm_version = VLLM_VERSION
        self.model_architecture = model_architecture

        # Environment variables
        self.env_var_json = json.dumps({
            env_var: getattr(envs, env_var)
            for env_var in _USAGE_ENV_VARS_TO_COLLECT
        })

        # Metadata
        self.log_time = _get_current_timestamp_ns()
        self.source = envs.VLLM_USAGE_SOURCE

        data = vars(self)
        if extra_kvs:
            data.update(extra_kvs)

        self._write_to_file(data)
        self._send_to_server(data)

    def _report_continuous_usage(self):
        """Report usage every 10 minutes.

        This helps us to collect more data points for uptime of vLLM usages.
        This function can also help send over performance metrics over time.
        """
        while True:
            time.sleep(600)
            data = {
                "uuid": self.uuid,
                "log_time": _get_current_timestamp_ns(),
            }
            data.update(_GLOBAL_RUNTIME_DATA)

            self._write_to_file(data)
            self._send_to_server(data)

    def _send_to_server(self, data: dict[str, Any]) -> None:
        try:
            global_http_client = global_http_connection.get_sync_client()
            global_http_client.post(_USAGE_STATS_SERVER, json=data)
        except requests.exceptions.RequestException:
            # silently ignore unless we are using debug log
            logging.debug("Failed to send usage data to server")

    def _write_to_file(self, data: dict[str, Any]) -> None:
        os.makedirs(os.path.dirname(_USAGE_STATS_JSON_PATH), exist_ok=True)
        Path(_USAGE_STATS_JSON_PATH).touch(exist_ok=True)
        with open(_USAGE_STATS_JSON_PATH, "a") as f:
            json.dump(data, f)
            f.write("\n")

architecture instance-attribute

architecture: Optional[str] = None

context instance-attribute

context: Optional[str] = None

cpu_family_model_stepping instance-attribute

cpu_family_model_stepping: Optional[str] = None

cpu_type instance-attribute

cpu_type: Optional[str] = None

cuda_runtime instance-attribute

cuda_runtime: Optional[str] = None

env_var_json instance-attribute

env_var_json: Optional[str] = None

gpu_count instance-attribute

gpu_count: Optional[int] = None

gpu_memory_per_device instance-attribute

gpu_memory_per_device: Optional[int] = None

gpu_type instance-attribute

gpu_type: Optional[str] = None

log_time instance-attribute

log_time: Optional[int] = None

model_architecture instance-attribute

model_architecture: Optional[str] = None

num_cpu instance-attribute

num_cpu: Optional[int] = None

platform instance-attribute

platform: Optional[str] = None

provider instance-attribute

provider: Optional[str] = None

source instance-attribute

source: Optional[str] = None

total_memory instance-attribute

total_memory: Optional[int] = None

uuid instance-attribute

uuid = str(uuid4())

vllm_version instance-attribute

vllm_version: Optional[str] = None

__init__

__init__() -> None
Source code in vllm/usage/usage_lib.py
def __init__(self) -> None:
    # NOTE: vLLM's server _only_ support flat KV pair.
    # Do not use nested fields.

    self.uuid = str(uuid4())

    # Environment Information
    self.provider: Optional[str] = None
    self.num_cpu: Optional[int] = None
    self.cpu_type: Optional[str] = None
    self.cpu_family_model_stepping: Optional[str] = None
    self.total_memory: Optional[int] = None
    self.architecture: Optional[str] = None
    self.platform: Optional[str] = None
    self.cuda_runtime: Optional[str] = None
    self.gpu_count: Optional[int] = None
    self.gpu_type: Optional[str] = None
    self.gpu_memory_per_device: Optional[int] = None
    self.env_var_json: Optional[str] = None

    # vLLM Information
    self.model_architecture: Optional[str] = None
    self.vllm_version: Optional[str] = None
    self.context: Optional[str] = None

    # Metadata
    self.log_time: Optional[int] = None
    self.source: Optional[str] = None

_report_continuous_usage

_report_continuous_usage()

Report usage every 10 minutes.

This helps us to collect more data points for uptime of vLLM usages. This function can also help send over performance metrics over time.

Source code in vllm/usage/usage_lib.py
def _report_continuous_usage(self):
    """Report usage every 10 minutes.

    This helps us to collect more data points for uptime of vLLM usages.
    This function can also help send over performance metrics over time.
    """
    while True:
        time.sleep(600)
        data = {
            "uuid": self.uuid,
            "log_time": _get_current_timestamp_ns(),
        }
        data.update(_GLOBAL_RUNTIME_DATA)

        self._write_to_file(data)
        self._send_to_server(data)

_report_usage_once

_report_usage_once(
    model_architecture: str,
    usage_context: UsageContext,
    extra_kvs: dict[str, Any],
) -> None
Source code in vllm/usage/usage_lib.py
def _report_usage_once(self, model_architecture: str,
                       usage_context: UsageContext,
                       extra_kvs: dict[str, Any]) -> None:
    # Platform information
    from vllm.platforms import current_platform
    if current_platform.is_cuda_alike():
        self.gpu_count = cuda_device_count_stateless()
        self.gpu_type, self.gpu_memory_per_device = (
            cuda_get_device_properties(0, ("name", "total_memory")))
    if current_platform.is_cuda():
        self.cuda_runtime = torch.version.cuda
    if current_platform.is_tpu():
        try:
            import torch_xla
            self.gpu_count = torch_xla.runtime.world_size()
            self.gpu_type = torch_xla.tpu.get_tpu_type()
            self.gpu_memory_per_device = (
                torch_xla.core.xla_model.get_memory_info()["bytes_limit"])
        except Exception:
            pass
    self.provider = _detect_cloud_provider()
    self.architecture = platform.machine()
    self.platform = platform.platform()
    self.total_memory = psutil.virtual_memory().total

    info = cpuinfo.get_cpu_info()
    self.num_cpu = info.get("count", None)
    self.cpu_type = info.get("brand_raw", "")
    self.cpu_family_model_stepping = ",".join([
        str(info.get("family", "")),
        str(info.get("model", "")),
        str(info.get("stepping", ""))
    ])

    # vLLM information
    self.context = usage_context.value
    self.vllm_version = VLLM_VERSION
    self.model_architecture = model_architecture

    # Environment variables
    self.env_var_json = json.dumps({
        env_var: getattr(envs, env_var)
        for env_var in _USAGE_ENV_VARS_TO_COLLECT
    })

    # Metadata
    self.log_time = _get_current_timestamp_ns()
    self.source = envs.VLLM_USAGE_SOURCE

    data = vars(self)
    if extra_kvs:
        data.update(extra_kvs)

    self._write_to_file(data)
    self._send_to_server(data)

_report_usage_worker

_report_usage_worker(
    model_architecture: str,
    usage_context: UsageContext,
    extra_kvs: dict[str, Any],
) -> None
Source code in vllm/usage/usage_lib.py
def _report_usage_worker(self, model_architecture: str,
                         usage_context: UsageContext,
                         extra_kvs: dict[str, Any]) -> None:
    self._report_usage_once(model_architecture, usage_context, extra_kvs)
    self._report_continuous_usage()

_send_to_server

_send_to_server(data: dict[str, Any]) -> None
Source code in vllm/usage/usage_lib.py
def _send_to_server(self, data: dict[str, Any]) -> None:
    try:
        global_http_client = global_http_connection.get_sync_client()
        global_http_client.post(_USAGE_STATS_SERVER, json=data)
    except requests.exceptions.RequestException:
        # silently ignore unless we are using debug log
        logging.debug("Failed to send usage data to server")

_write_to_file

_write_to_file(data: dict[str, Any]) -> None
Source code in vllm/usage/usage_lib.py
def _write_to_file(self, data: dict[str, Any]) -> None:
    os.makedirs(os.path.dirname(_USAGE_STATS_JSON_PATH), exist_ok=True)
    Path(_USAGE_STATS_JSON_PATH).touch(exist_ok=True)
    with open(_USAGE_STATS_JSON_PATH, "a") as f:
        json.dump(data, f)
        f.write("\n")

report_usage

report_usage(
    model_architecture: str,
    usage_context: UsageContext,
    extra_kvs: Optional[dict[str, Any]] = None,
) -> None
Source code in vllm/usage/usage_lib.py
def report_usage(self,
                 model_architecture: str,
                 usage_context: UsageContext,
                 extra_kvs: Optional[dict[str, Any]] = None) -> None:
    t = Thread(target=self._report_usage_worker,
               args=(model_architecture, usage_context, extra_kvs or {}),
               daemon=True)
    t.start()

_detect_cloud_provider

_detect_cloud_provider() -> str
Source code in vllm/usage/usage_lib.py
def _detect_cloud_provider() -> str:
    # Try detecting through vendor file
    vendor_files = [
        "/sys/class/dmi/id/product_version", "/sys/class/dmi/id/bios_vendor",
        "/sys/class/dmi/id/product_name",
        "/sys/class/dmi/id/chassis_asset_tag", "/sys/class/dmi/id/sys_vendor"
    ]
    # Mapping of identifiable strings to cloud providers
    cloud_identifiers = {
        "amazon": "AWS",
        "microsoft corporation": "AZURE",
        "google": "GCP",
        "oraclecloud": "OCI",
    }

    for vendor_file in vendor_files:
        path = Path(vendor_file)
        if path.is_file():
            file_content = path.read_text().lower()
            for identifier, provider in cloud_identifiers.items():
                if identifier in file_content:
                    return provider

    # Try detecting through environment variables
    env_to_cloud_provider = {
        "RUNPOD_DC_ID": "RUNPOD",
    }
    for env_var, provider in env_to_cloud_provider.items():
        if os.environ.get(env_var):
            return provider

    return "UNKNOWN"

_get_current_timestamp_ns

_get_current_timestamp_ns() -> int
Source code in vllm/usage/usage_lib.py
def _get_current_timestamp_ns() -> int:
    return int(datetime.datetime.now(datetime.timezone.utc).timestamp() * 1e9)

is_usage_stats_enabled

is_usage_stats_enabled()

Determine whether or not we can send usage stats to the server. The logic is as follows: - By default, it should be enabled. - Three environment variables can disable it: - VLLM_DO_NOT_TRACK=1 - DO_NOT_TRACK=1 - VLLM_NO_USAGE_STATS=1 - A file in the home directory can disable it if it exists: - $HOME/.config/vllm/do_not_track

Source code in vllm/usage/usage_lib.py
def is_usage_stats_enabled():
    """Determine whether or not we can send usage stats to the server.
    The logic is as follows:
    - By default, it should be enabled.
    - Three environment variables can disable it:
        - VLLM_DO_NOT_TRACK=1
        - DO_NOT_TRACK=1
        - VLLM_NO_USAGE_STATS=1
    - A file in the home directory can disable it if it exists:
        - $HOME/.config/vllm/do_not_track
    """
    global _USAGE_STATS_ENABLED
    if _USAGE_STATS_ENABLED is None:
        do_not_track = envs.VLLM_DO_NOT_TRACK
        no_usage_stats = envs.VLLM_NO_USAGE_STATS
        do_not_track_file = os.path.exists(_USAGE_STATS_DO_NOT_TRACK_PATH)

        _USAGE_STATS_ENABLED = not (do_not_track or no_usage_stats
                                    or do_not_track_file)
    return _USAGE_STATS_ENABLED

set_runtime_usage_data

set_runtime_usage_data(
    key: str, value: Union[str, int, bool]
) -> None

Set global usage data that will be sent with every usage heartbeat.

Source code in vllm/usage/usage_lib.py
def set_runtime_usage_data(key: str, value: Union[str, int, bool]) -> None:
    """Set global usage data that will be sent with every usage heartbeat."""
    _GLOBAL_RUNTIME_DATA[key] = value