Skip to content

vllm.benchmarks.datasets

This module defines a framework for sampling benchmark requests from various datasets. Each dataset subclass of BenchmarkDataset must implement sample generation. Supported dataset types include: - ShareGPT - Random (synthetic) - Sonnet - BurstGPT - HuggingFace - VisionArena

datasets module-attribute

datasets = PlaceholderModule('datasets')

logger module-attribute

logger = getLogger(__name__)

lora_tokenizer_cache module-attribute

lora_tokenizer_cache: dict[int, AnyTokenizer] = {}

zeta_prompt module-attribute

zeta_prompt = "### Instruction:\nYou are a code completion assistant and your task is to analyze user edits and then rewrite an excerpt that the user provides, suggesting the appropriate edits within the excerpt, taking into account the cursor location.\n\n### User Edits:\n\n{}\n\n### User Excerpt:\n\n{}\n\n### Response:\n\n"

AIMODataset

Bases: HuggingFaceDataset

Dataset class for processing a AIMO dataset with reasoning questions.

Source code in vllm/benchmarks/datasets.py
class AIMODataset(HuggingFaceDataset):
    """
    Dataset class for processing a AIMO dataset with reasoning questions.
    """
    SUPPORTED_DATASET_PATHS = {
        "AI-MO/aimo-validation-aime", "AI-MO/NuminaMath-1.5",
        "AI-MO/NuminaMath-CoT"
    }

    def sample(self,
               tokenizer: PreTrainedTokenizerBase,
               num_requests: int,
               output_len: Optional[int] = None,
               **kwargs) -> list:
        sampled_requests = []
        dynamic_output = output_len is None

        for item in self.data:
            if len(sampled_requests) >= num_requests:
                break
            prompt, completion = item['problem'], item["solution"]

            prompt_ids = tokenizer(prompt).input_ids
            completion_ids = tokenizer(completion).input_ids
            prompt_len = len(prompt_ids)
            completion_len = len(completion_ids)
            output_len = completion_len if dynamic_output else output_len
            assert isinstance(output_len, int) and output_len > 0
            if dynamic_output and not is_valid_sequence(prompt_len,
                                                        completion_len,
                                                        max_prompt_len=2048,
                                                        max_total_len=32000):
                continue
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                    multi_modal_data=None,
                ))
        self.maybe_oversample_requests(sampled_requests, num_requests)
        return sampled_requests

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {
    "AI-MO/aimo-validation-aime",
    "AI-MO/NuminaMath-1.5",
    "AI-MO/NuminaMath-CoT",
}

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(self,
           tokenizer: PreTrainedTokenizerBase,
           num_requests: int,
           output_len: Optional[int] = None,
           **kwargs) -> list:
    sampled_requests = []
    dynamic_output = output_len is None

    for item in self.data:
        if len(sampled_requests) >= num_requests:
            break
        prompt, completion = item['problem'], item["solution"]

        prompt_ids = tokenizer(prompt).input_ids
        completion_ids = tokenizer(completion).input_ids
        prompt_len = len(prompt_ids)
        completion_len = len(completion_ids)
        output_len = completion_len if dynamic_output else output_len
        assert isinstance(output_len, int) and output_len > 0
        if dynamic_output and not is_valid_sequence(prompt_len,
                                                    completion_len,
                                                    max_prompt_len=2048,
                                                    max_total_len=32000):
            continue
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
                multi_modal_data=None,
            ))
    self.maybe_oversample_requests(sampled_requests, num_requests)
    return sampled_requests

ASRDataset

Bases: HuggingFaceDataset

Dataset class for processing a ASR dataset for transcription. Tested on the following set:

+----------------+----------------------------------------+--------------------------+-----------------------------+ | Dataset | Domain | Speaking Style | hf-subset | +----------------+----------------------------------------+--------------------------+-----------------------------+ | TED-LIUM | TED talks | Oratory | release1, release2, release3| | | | | release3-speaker-adaptation | | VoxPopuli | European Parliament | Oratory | en, de, it, fr, ... | | LibriSpeech | Audiobook | Narrated | "LIUM/tedlium" | | GigaSpeech | Audiobook, podcast, YouTube | Narrated, spontaneous | xs, s, m, l, xl, dev, test | | SPGISpeech | Financial meetings | Oratory, spontaneous | S, M, L, dev, test | | AMI | Meetings | Spontaneous | ihm, sdm | +----------------+----------------------------------------+--------------------------+-----------------------------+

Source code in vllm/benchmarks/datasets.py
class ASRDataset(HuggingFaceDataset):
    """
    Dataset class for processing a ASR dataset for transcription.
    Tested on the following set:

    +----------------+----------------------------------------+--------------------------+-----------------------------+
    | Dataset        | Domain                                 | Speaking Style           | hf-subset                   |
    +----------------+----------------------------------------+--------------------------+-----------------------------+
    | TED-LIUM       | TED talks                              | Oratory                  | release1, release2, release3|
    |                |                                        |                          | release3-speaker-adaptation |
    | VoxPopuli      | European Parliament                    | Oratory                  | en, de, it, fr,  ...        |
    | LibriSpeech    | Audiobook                              | Narrated                 | "LIUM/tedlium"              |
    | GigaSpeech     | Audiobook, podcast, YouTube            | Narrated, spontaneous    | xs, s, m, l, xl, dev, test  |
    | SPGISpeech     | Financial meetings                     | Oratory, spontaneous     | S, M, L, dev, test          |
    | AMI            | Meetings                               | Spontaneous              | ihm, sdm                    |
    +----------------+----------------------------------------+--------------------------+-----------------------------+

    """  # noqa: E501

    SUPPORTED_DATASET_PATHS = {
        "openslr/librispeech_asr",
        "facebook/voxpopuli",
        "LIUM/tedlium",
        "edinburghcstr/ami",
        "speechcolab/gigaspeech",
        "kensho/spgispeech",
    }

    DEFAULT_OUTPUT_LEN = 128
    IS_MULTIMODAL = True

    # TODO Whisper-specific. Abstract interface when more models are supported.
    TRANSCRIPTION_PREAMBLE = (
        "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>")
    skip_long_audios: bool = True

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        output_len: Optional[int] = None,
        **kwargs,
    ) -> list:
        output_len = (output_len
                      if output_len is not None else self.DEFAULT_OUTPUT_LEN)
        prompt = ASRDataset.TRANSCRIPTION_PREAMBLE
        prompt_len = len(tokenizer(prompt).input_ids)
        sampled_requests = []
        skipped = 0
        for item in self.data:
            if len(sampled_requests) >= num_requests:
                break
            audio = item["audio"]
            y, sr = audio["array"], audio["sampling_rate"]
            duration_s = librosa.get_duration(y=y, sr=sr)
            # Whisper max supported duration
            if self.skip_long_audios and duration_s > 30:
                skipped += 1
                continue

            mm_content = {"audio": (y, sr)}
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                    multi_modal_data=mm_content,
                ))
        if skipped:
            logger.warning(
                "%d samples discarded from dataset due to"
                " their length being greater than"
                " what Whisper supports.",
                skipped,
            )
        self.maybe_oversample_requests(sampled_requests, num_requests)
        return sampled_requests

DEFAULT_OUTPUT_LEN class-attribute instance-attribute

DEFAULT_OUTPUT_LEN = 128

IS_MULTIMODAL class-attribute instance-attribute

IS_MULTIMODAL = True

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {
    "openslr/librispeech_asr",
    "facebook/voxpopuli",
    "LIUM/tedlium",
    "edinburghcstr/ami",
    "speechcolab/gigaspeech",
    "kensho/spgispeech",
}

TRANSCRIPTION_PREAMBLE class-attribute instance-attribute

TRANSCRIPTION_PREAMBLE = "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>"

skip_long_audios class-attribute instance-attribute

skip_long_audios: bool = True

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    **kwargs,
) -> list:
    output_len = (output_len
                  if output_len is not None else self.DEFAULT_OUTPUT_LEN)
    prompt = ASRDataset.TRANSCRIPTION_PREAMBLE
    prompt_len = len(tokenizer(prompt).input_ids)
    sampled_requests = []
    skipped = 0
    for item in self.data:
        if len(sampled_requests) >= num_requests:
            break
        audio = item["audio"]
        y, sr = audio["array"], audio["sampling_rate"]
        duration_s = librosa.get_duration(y=y, sr=sr)
        # Whisper max supported duration
        if self.skip_long_audios and duration_s > 30:
            skipped += 1
            continue

        mm_content = {"audio": (y, sr)}
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
                multi_modal_data=mm_content,
            ))
    if skipped:
        logger.warning(
            "%d samples discarded from dataset due to"
            " their length being greater than"
            " what Whisper supports.",
            skipped,
        )
    self.maybe_oversample_requests(sampled_requests, num_requests)
    return sampled_requests

BenchmarkDataset

Bases: ABC

Source code in vllm/benchmarks/datasets.py
class BenchmarkDataset(ABC):
    DEFAULT_SEED = 0
    IS_MULTIMODAL = False

    def __init__(
        self,
        dataset_path: Optional[str] = None,
        random_seed: int = DEFAULT_SEED,
    ) -> None:
        """
        Initialize the BenchmarkDataset with an optional dataset path and random
        seed.  

        Args:
            dataset_path (Optional[str]): Path to the dataset. If None, it
            indicates that a default or random dataset might be used.
            random_seed (int): Seed value for reproducible shuffling or
            sampling. Defaults to DEFAULT_SEED.
        """
        self.dataset_path = dataset_path
        # Set the random seed, ensuring that a None value is replaced with the
        # default seed.
        self.random_seed = (random_seed
                            if random_seed is not None else self.DEFAULT_SEED)
        self.data = None

    def apply_multimodal_chat_transformation(
            self,
            prompt: str,
            mm_content: Optional[MultiModalDataDict] = None) -> list[dict]:
        """
        Transform a prompt and optional multimodal content into a chat format.
        This method is used for chat models that expect a specific conversation
        format.
        """
        content = [{"text": prompt, "type": "text"}]
        if mm_content is not None:
            content.append(mm_content)
        return [{"role": "user", "content": content}]

    def load_data(self) -> None:
        """
        Load data from the dataset path into self.data.

        This method must be overridden by subclasses since the method to load
        data will vary depending on the dataset format and source.

        Raises:
            NotImplementedError: If a subclass does not implement this method.
        """
        # TODO (jenniferzhao): add support for downloading data
        raise NotImplementedError(
            "load_data must be implemented in subclasses.")

    def get_random_lora_request(
        self,
        tokenizer: PreTrainedTokenizerBase,
        max_loras: Optional[int] = None,
        lora_path: Optional[str] = None,
    ) -> tuple[Optional[LoRARequest], AnyTokenizer]:
        """
        Optionally select a random LoRA request and return its associated
        tokenizer.

        This method is used when LoRA parameters are provided.  It randomly
        selects a LoRA based on max_loras and retrieves a cached tokenizer for
        that LoRA if available. Otherwise, it returns the base tokenizer.

        Args:
            tokenizer (PreTrainedTokenizerBase): The base tokenizer to use if no
                LoRA is selected.
            max_loras (Optional[int]): The maximum number of LoRAs available.
                If `None`, LoRA is not used.
            lora_path (Optional[str]): Path to the LoRA parameters on disk.
                If `None`, LoRA is not used.

        Returns:
            A tuple with the following elements:
                - A new [LoRARequest][] (or `None` if not applicable).
                - The tokenizer associated with the LoRA request
                  (or the base tokenizer).
        """
        if max_loras is None or lora_path is None:
            return None, tokenizer

        # Generate a random LoRA ID in the range [1, max_loras].
        lora_id = random.randint(1, max_loras)
        lora_request = LoRARequest(
            lora_name=str(lora_id),
            lora_int_id=lora_id,
            lora_path=lora_path_on_disk(lora_path),
        )
        if lora_id not in lora_tokenizer_cache:
            lora_tokenizer_cache[lora_id] = get_lora_tokenizer(lora_request)
        # Return lora_request and the cached tokenizer if available; otherwise,
        # return the base tokenizer
        return lora_request, lora_tokenizer_cache[lora_id] or tokenizer

    @abstractmethod
    def sample(self, tokenizer: PreTrainedTokenizerBase,
               num_requests: int) -> list[SampleRequest]:
        """
        Abstract method to generate sample requests from the dataset.

        Subclasses must override this method to implement dataset-specific logic
        for generating a list of SampleRequest objects.

        Args:
            tokenizer (PreTrainedTokenizerBase): The tokenizer to be used
                for processing the dataset's text.
            num_requests (int): The number of sample requests to generate.

        Returns:
            list[SampleRequest]: A list of sample requests generated from the
            dataset.
        """
        raise NotImplementedError("sample must be implemented in subclasses.")

    def maybe_oversample_requests(self, requests: list[SampleRequest],
                                  num_requests: int) -> None:
        """
        Oversamples the list of requests if its size is less than the desired
        number.

        Args:
            requests (List[SampleRequest]): The current list of sampled
                requests.
            num_requests (int): The target number of requests.
        """
        if len(requests) < num_requests:
            random.seed(self.random_seed)
            additional = random.choices(requests,
                                        k=num_requests - len(requests))
            requests.extend(additional)
            logger.info("Oversampled requests to reach %d total samples.",
                        num_requests)

DEFAULT_SEED class-attribute instance-attribute

DEFAULT_SEED = 0

IS_MULTIMODAL class-attribute instance-attribute

IS_MULTIMODAL = False

data instance-attribute

data = None

dataset_path instance-attribute

dataset_path = dataset_path

random_seed instance-attribute

random_seed = (
    random_seed if random_seed is not None else DEFAULT_SEED
)

__init__

__init__(
    dataset_path: Optional[str] = None,
    random_seed: int = DEFAULT_SEED,
) -> None

Initialize the BenchmarkDataset with an optional dataset path and random seed.

Parameters:

Name Type Description Default
dataset_path Optional[str]

Path to the dataset. If None, it

None
random_seed int

Seed value for reproducible shuffling or

DEFAULT_SEED
Source code in vllm/benchmarks/datasets.py
def __init__(
    self,
    dataset_path: Optional[str] = None,
    random_seed: int = DEFAULT_SEED,
) -> None:
    """
    Initialize the BenchmarkDataset with an optional dataset path and random
    seed.  

    Args:
        dataset_path (Optional[str]): Path to the dataset. If None, it
        indicates that a default or random dataset might be used.
        random_seed (int): Seed value for reproducible shuffling or
        sampling. Defaults to DEFAULT_SEED.
    """
    self.dataset_path = dataset_path
    # Set the random seed, ensuring that a None value is replaced with the
    # default seed.
    self.random_seed = (random_seed
                        if random_seed is not None else self.DEFAULT_SEED)
    self.data = None

apply_multimodal_chat_transformation

apply_multimodal_chat_transformation(
    prompt: str,
    mm_content: Optional[MultiModalDataDict] = None,
) -> list[dict]

Transform a prompt and optional multimodal content into a chat format. This method is used for chat models that expect a specific conversation format.

Source code in vllm/benchmarks/datasets.py
def apply_multimodal_chat_transformation(
        self,
        prompt: str,
        mm_content: Optional[MultiModalDataDict] = None) -> list[dict]:
    """
    Transform a prompt and optional multimodal content into a chat format.
    This method is used for chat models that expect a specific conversation
    format.
    """
    content = [{"text": prompt, "type": "text"}]
    if mm_content is not None:
        content.append(mm_content)
    return [{"role": "user", "content": content}]

get_random_lora_request

get_random_lora_request(
    tokenizer: PreTrainedTokenizerBase,
    max_loras: Optional[int] = None,
    lora_path: Optional[str] = None,
) -> tuple[Optional[LoRARequest], AnyTokenizer]

Optionally select a random LoRA request and return its associated tokenizer.

This method is used when LoRA parameters are provided. It randomly selects a LoRA based on max_loras and retrieves a cached tokenizer for that LoRA if available. Otherwise, it returns the base tokenizer.

Parameters:

Name Type Description Default
tokenizer PreTrainedTokenizerBase

The base tokenizer to use if no LoRA is selected.

required
max_loras Optional[int]

The maximum number of LoRAs available. If None, LoRA is not used.

None
lora_path Optional[str]

Path to the LoRA parameters on disk. If None, LoRA is not used.

None

Returns:

Type Description
tuple[Optional[LoRARequest], AnyTokenizer]

A tuple with the following elements: - A new [LoRARequest][] (or None if not applicable). - The tokenizer associated with the LoRA request (or the base tokenizer).

Source code in vllm/benchmarks/datasets.py
def get_random_lora_request(
    self,
    tokenizer: PreTrainedTokenizerBase,
    max_loras: Optional[int] = None,
    lora_path: Optional[str] = None,
) -> tuple[Optional[LoRARequest], AnyTokenizer]:
    """
    Optionally select a random LoRA request and return its associated
    tokenizer.

    This method is used when LoRA parameters are provided.  It randomly
    selects a LoRA based on max_loras and retrieves a cached tokenizer for
    that LoRA if available. Otherwise, it returns the base tokenizer.

    Args:
        tokenizer (PreTrainedTokenizerBase): The base tokenizer to use if no
            LoRA is selected.
        max_loras (Optional[int]): The maximum number of LoRAs available.
            If `None`, LoRA is not used.
        lora_path (Optional[str]): Path to the LoRA parameters on disk.
            If `None`, LoRA is not used.

    Returns:
        A tuple with the following elements:
            - A new [LoRARequest][] (or `None` if not applicable).
            - The tokenizer associated with the LoRA request
              (or the base tokenizer).
    """
    if max_loras is None or lora_path is None:
        return None, tokenizer

    # Generate a random LoRA ID in the range [1, max_loras].
    lora_id = random.randint(1, max_loras)
    lora_request = LoRARequest(
        lora_name=str(lora_id),
        lora_int_id=lora_id,
        lora_path=lora_path_on_disk(lora_path),
    )
    if lora_id not in lora_tokenizer_cache:
        lora_tokenizer_cache[lora_id] = get_lora_tokenizer(lora_request)
    # Return lora_request and the cached tokenizer if available; otherwise,
    # return the base tokenizer
    return lora_request, lora_tokenizer_cache[lora_id] or tokenizer

load_data

load_data() -> None

Load data from the dataset path into self.data.

This method must be overridden by subclasses since the method to load data will vary depending on the dataset format and source.

Raises:

Type Description
NotImplementedError

If a subclass does not implement this method.

Source code in vllm/benchmarks/datasets.py
def load_data(self) -> None:
    """
    Load data from the dataset path into self.data.

    This method must be overridden by subclasses since the method to load
    data will vary depending on the dataset format and source.

    Raises:
        NotImplementedError: If a subclass does not implement this method.
    """
    # TODO (jenniferzhao): add support for downloading data
    raise NotImplementedError(
        "load_data must be implemented in subclasses.")

maybe_oversample_requests

maybe_oversample_requests(
    requests: list[SampleRequest], num_requests: int
) -> None

Oversamples the list of requests if its size is less than the desired number.

Parameters:

Name Type Description Default
requests List[SampleRequest]

The current list of sampled requests.

required
num_requests int

The target number of requests.

required
Source code in vllm/benchmarks/datasets.py
def maybe_oversample_requests(self, requests: list[SampleRequest],
                              num_requests: int) -> None:
    """
    Oversamples the list of requests if its size is less than the desired
    number.

    Args:
        requests (List[SampleRequest]): The current list of sampled
            requests.
        num_requests (int): The target number of requests.
    """
    if len(requests) < num_requests:
        random.seed(self.random_seed)
        additional = random.choices(requests,
                                    k=num_requests - len(requests))
        requests.extend(additional)
        logger.info("Oversampled requests to reach %d total samples.",
                    num_requests)

sample abstractmethod

sample(
    tokenizer: PreTrainedTokenizerBase, num_requests: int
) -> list[SampleRequest]

Abstract method to generate sample requests from the dataset.

Subclasses must override this method to implement dataset-specific logic for generating a list of SampleRequest objects.

Parameters:

Name Type Description Default
tokenizer PreTrainedTokenizerBase

The tokenizer to be used for processing the dataset's text.

required
num_requests int

The number of sample requests to generate.

required

Returns:

Type Description
list[SampleRequest]

list[SampleRequest]: A list of sample requests generated from the

list[SampleRequest]

dataset.

Source code in vllm/benchmarks/datasets.py
@abstractmethod
def sample(self, tokenizer: PreTrainedTokenizerBase,
           num_requests: int) -> list[SampleRequest]:
    """
    Abstract method to generate sample requests from the dataset.

    Subclasses must override this method to implement dataset-specific logic
    for generating a list of SampleRequest objects.

    Args:
        tokenizer (PreTrainedTokenizerBase): The tokenizer to be used
            for processing the dataset's text.
        num_requests (int): The number of sample requests to generate.

    Returns:
        list[SampleRequest]: A list of sample requests generated from the
        dataset.
    """
    raise NotImplementedError("sample must be implemented in subclasses.")

BurstGPTDataset

Bases: BenchmarkDataset

Implements the BurstGPT dataset. Loads data from a CSV file and generates sample requests based on synthetic prompt generation. Only rows with Model "GPT-4" and positive response tokens are used.

Source code in vllm/benchmarks/datasets.py
class BurstGPTDataset(BenchmarkDataset):
    """
    Implements the BurstGPT dataset.  Loads data from a CSV file and generates
    sample requests based on synthetic prompt generation. Only rows with Model
    "GPT-4" and positive response tokens are used.
    """

    def __init__(self, **kwargs) -> None:
        super().__init__(**kwargs)
        self.load_data()

    def load_data(self, ):
        if self.dataset_path is None:
            raise ValueError("dataset_path must be provided for loading data.")

        df = pd.read_csv(self.dataset_path)
        # Filter to keep only GPT-4 rows.
        gpt4_df = df[df["Model"] == "GPT-4"]
        # Remove failed requests (where Response tokens is 0 or less).
        gpt4_df = gpt4_df[gpt4_df["Response tokens"] > 0]
        # Sample the desired number of rows.
        self.data = gpt4_df

    def _sample_loaded_data(self, num_requests: int) -> list:
        if num_requests <= len(self.data):
            data = self.data.sample(n=num_requests,
                                    random_state=self.random_seed)
        else:
            data = self.data.sample(
                n=num_requests,
                random_state=self.random_seed,
                replace=True,
            )
        # Convert the dataframe to a list of lists.
        return data.values.tolist()

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        max_loras: Optional[int] = None,
        lora_path: Optional[str] = None,
        **kwargs,
    ) -> list[SampleRequest]:
        samples = []
        data = self._sample_loaded_data(num_requests=num_requests)
        for i in range(num_requests):
            input_len = int(data[i][2])
            output_len = int(data[i][3])
            lora_req, tokenizer = self.get_random_lora_request(
                tokenizer=tokenizer, max_loras=max_loras, lora_path=lora_path)
            vocab_size = tokenizer.vocab_size
            # Generate a synthetic prompt: a list of token IDs computed as (i +
            # j) modulo vocab_size.
            token_ids = [(i + j) % vocab_size for j in range(input_len)]
            prompt = tokenizer.decode(token_ids)
            samples.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=input_len,
                    expected_output_len=output_len,
                    lora_request=lora_req,
                ))
        return samples

__init__

__init__(**kwargs) -> None
Source code in vllm/benchmarks/datasets.py
def __init__(self, **kwargs) -> None:
    super().__init__(**kwargs)
    self.load_data()

_sample_loaded_data

_sample_loaded_data(num_requests: int) -> list
Source code in vllm/benchmarks/datasets.py
def _sample_loaded_data(self, num_requests: int) -> list:
    if num_requests <= len(self.data):
        data = self.data.sample(n=num_requests,
                                random_state=self.random_seed)
    else:
        data = self.data.sample(
            n=num_requests,
            random_state=self.random_seed,
            replace=True,
        )
    # Convert the dataframe to a list of lists.
    return data.values.tolist()

load_data

load_data()
Source code in vllm/benchmarks/datasets.py
def load_data(self, ):
    if self.dataset_path is None:
        raise ValueError("dataset_path must be provided for loading data.")

    df = pd.read_csv(self.dataset_path)
    # Filter to keep only GPT-4 rows.
    gpt4_df = df[df["Model"] == "GPT-4"]
    # Remove failed requests (where Response tokens is 0 or less).
    gpt4_df = gpt4_df[gpt4_df["Response tokens"] > 0]
    # Sample the desired number of rows.
    self.data = gpt4_df

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    max_loras: Optional[int] = None,
    lora_path: Optional[str] = None,
    **kwargs,
) -> list[SampleRequest]
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    max_loras: Optional[int] = None,
    lora_path: Optional[str] = None,
    **kwargs,
) -> list[SampleRequest]:
    samples = []
    data = self._sample_loaded_data(num_requests=num_requests)
    for i in range(num_requests):
        input_len = int(data[i][2])
        output_len = int(data[i][3])
        lora_req, tokenizer = self.get_random_lora_request(
            tokenizer=tokenizer, max_loras=max_loras, lora_path=lora_path)
        vocab_size = tokenizer.vocab_size
        # Generate a synthetic prompt: a list of token IDs computed as (i +
        # j) modulo vocab_size.
        token_ids = [(i + j) % vocab_size for j in range(input_len)]
        prompt = tokenizer.decode(token_ids)
        samples.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=input_len,
                expected_output_len=output_len,
                lora_request=lora_req,
            ))
    return samples

ConversationDataset

Bases: HuggingFaceDataset

Dataset for conversation data with multimodal support.

Source code in vllm/benchmarks/datasets.py
class ConversationDataset(HuggingFaceDataset):
    """Dataset for conversation data with multimodal support."""
    SUPPORTED_DATASET_PATHS = {
        'lmms-lab/LLaVA-OneVision-Data', 'Aeala/ShareGPT_Vicuna_unfiltered'
    }
    IS_MULTIMODAL = True

    def sample(self,
               tokenizer: PreTrainedTokenizerBase,
               num_requests: int,
               output_len: Optional[int] = None,
               enable_multimodal_chat: bool = False,
               **kwargs) -> list:
        # Filter examples with at least 2 conversations
        filtered_data = self.data.filter(
            lambda x: len(x["conversations"]) >= 2)
        sampled_requests = []
        dynamic_output = output_len is None

        for item in filtered_data:
            if len(sampled_requests) >= num_requests:
                break
            conv = item["conversations"]
            prompt, completion = conv[0]["value"], conv[1]["value"]

            prompt_ids = tokenizer(prompt).input_ids
            completion_ids = tokenizer(completion).input_ids
            prompt_len = len(prompt_ids)
            completion_len = len(completion_ids)
            output_len = completion_len if dynamic_output else output_len
            assert isinstance(output_len, int) and output_len > 0
            if dynamic_output and not is_valid_sequence(
                    prompt_len, completion_len):
                continue
            mm_content = process_image(
                item["image"]) if "image" in item else None
            if enable_multimodal_chat:
                # Note: when chat is enabled the request prompt_len is no longer
                # accurate and we will be using request output to count the
                # actual prompt len and output len
                prompt = self.apply_multimodal_chat_transformation(
                    prompt, mm_content)
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                    multi_modal_data=mm_content,
                ))
        self.maybe_oversample_requests(sampled_requests, num_requests)
        return sampled_requests

IS_MULTIMODAL class-attribute instance-attribute

IS_MULTIMODAL = True

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {
    "lmms-lab/LLaVA-OneVision-Data",
    "Aeala/ShareGPT_Vicuna_unfiltered",
}

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(self,
           tokenizer: PreTrainedTokenizerBase,
           num_requests: int,
           output_len: Optional[int] = None,
           enable_multimodal_chat: bool = False,
           **kwargs) -> list:
    # Filter examples with at least 2 conversations
    filtered_data = self.data.filter(
        lambda x: len(x["conversations"]) >= 2)
    sampled_requests = []
    dynamic_output = output_len is None

    for item in filtered_data:
        if len(sampled_requests) >= num_requests:
            break
        conv = item["conversations"]
        prompt, completion = conv[0]["value"], conv[1]["value"]

        prompt_ids = tokenizer(prompt).input_ids
        completion_ids = tokenizer(completion).input_ids
        prompt_len = len(prompt_ids)
        completion_len = len(completion_ids)
        output_len = completion_len if dynamic_output else output_len
        assert isinstance(output_len, int) and output_len > 0
        if dynamic_output and not is_valid_sequence(
                prompt_len, completion_len):
            continue
        mm_content = process_image(
            item["image"]) if "image" in item else None
        if enable_multimodal_chat:
            # Note: when chat is enabled the request prompt_len is no longer
            # accurate and we will be using request output to count the
            # actual prompt len and output len
            prompt = self.apply_multimodal_chat_transformation(
                prompt, mm_content)
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
                multi_modal_data=mm_content,
            ))
    self.maybe_oversample_requests(sampled_requests, num_requests)
    return sampled_requests

CustomDataset

Bases: BenchmarkDataset

Implements the Custom dataset. Loads data from a JSONL file and generates sample requests based on conversation turns. E.g.,

{"prompt": "What is the capital of India?"}
{"prompt": "What is the capital of Iran?"}
{"prompt": "What is the capital of China?"}

Source code in vllm/benchmarks/datasets.py
class CustomDataset(BenchmarkDataset):
    """
    Implements the Custom dataset.  Loads data from a JSONL file and generates
    sample requests based on conversation turns. E.g.,
    ```
    {"prompt": "What is the capital of India?"}
    {"prompt": "What is the capital of Iran?"}
    {"prompt": "What is the capital of China?"}
    ```
    """

    def __init__(self, **kwargs) -> None:
        super().__init__(**kwargs)
        self.load_data()

    def load_data(self) -> None:
        if self.dataset_path is None:
            raise ValueError("dataset_path must be provided for loading data.")

        # self.data will be a list of dictionaries
        # e.g., [{"prompt": "What is the capital of India?"}, ...]
        # This will be the standardized format which load_data()
        # has to convert into depending on the filetype of dataset_path.
        # sample() will assume this standardized format of self.data
        self.data = []

        # Load the JSONL file
        if self.dataset_path.endswith(".jsonl"):
            jsonl_data = pd.read_json(path_or_buf=self.dataset_path,
                                      lines=True)

            # check if the JSONL file has a 'prompt' column
            if "prompt" not in jsonl_data.columns:
                raise ValueError("JSONL file must contain a 'prompt' column.")

            # Convert each row to a dictionary and append to self.data
            # This will convert the DataFrame to a list of dictionaries
            # where each dictionary corresponds to a row in the DataFrame.
            # This is the standardized format we want for self.data
            for _, row in jsonl_data.iterrows():
                self.data.append(row.to_dict())
        else:
            raise NotImplementedError(
                "Only JSONL format is supported for CustomDataset.")

        random.seed(self.random_seed)
        random.shuffle(self.data)

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        lora_path: Optional[str] = None,
        max_loras: Optional[int] = None,
        output_len: Optional[int] = None,
        enable_multimodal_chat: bool = False,
        skip_chat_template: bool = False,
        **kwargs,
    ) -> list:
        sampled_requests = []
        for item in self.data:
            if len(sampled_requests) >= num_requests:
                break
            prompt = item["prompt"]

            # apply template
            if not skip_chat_template:
                prompt = tokenizer.apply_chat_template(
                    [{
                        "role": "user",
                        "content": prompt
                    }],
                    add_generation_prompt=True,
                    tokenize=False,
                )

            prompt_len = len(tokenizer(prompt).input_ids)
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                ))
        self.maybe_oversample_requests(sampled_requests, num_requests)

        return sampled_requests

__init__

__init__(**kwargs) -> None
Source code in vllm/benchmarks/datasets.py
def __init__(self, **kwargs) -> None:
    super().__init__(**kwargs)
    self.load_data()

load_data

load_data() -> None
Source code in vllm/benchmarks/datasets.py
def load_data(self) -> None:
    if self.dataset_path is None:
        raise ValueError("dataset_path must be provided for loading data.")

    # self.data will be a list of dictionaries
    # e.g., [{"prompt": "What is the capital of India?"}, ...]
    # This will be the standardized format which load_data()
    # has to convert into depending on the filetype of dataset_path.
    # sample() will assume this standardized format of self.data
    self.data = []

    # Load the JSONL file
    if self.dataset_path.endswith(".jsonl"):
        jsonl_data = pd.read_json(path_or_buf=self.dataset_path,
                                  lines=True)

        # check if the JSONL file has a 'prompt' column
        if "prompt" not in jsonl_data.columns:
            raise ValueError("JSONL file must contain a 'prompt' column.")

        # Convert each row to a dictionary and append to self.data
        # This will convert the DataFrame to a list of dictionaries
        # where each dictionary corresponds to a row in the DataFrame.
        # This is the standardized format we want for self.data
        for _, row in jsonl_data.iterrows():
            self.data.append(row.to_dict())
    else:
        raise NotImplementedError(
            "Only JSONL format is supported for CustomDataset.")

    random.seed(self.random_seed)
    random.shuffle(self.data)

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    lora_path: Optional[str] = None,
    max_loras: Optional[int] = None,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    skip_chat_template: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    lora_path: Optional[str] = None,
    max_loras: Optional[int] = None,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    skip_chat_template: bool = False,
    **kwargs,
) -> list:
    sampled_requests = []
    for item in self.data:
        if len(sampled_requests) >= num_requests:
            break
        prompt = item["prompt"]

        # apply template
        if not skip_chat_template:
            prompt = tokenizer.apply_chat_template(
                [{
                    "role": "user",
                    "content": prompt
                }],
                add_generation_prompt=True,
                tokenize=False,
            )

        prompt_len = len(tokenizer(prompt).input_ids)
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
            ))
    self.maybe_oversample_requests(sampled_requests, num_requests)

    return sampled_requests

HuggingFaceDataset

Bases: BenchmarkDataset

Base class for datasets hosted on HuggingFace.

Source code in vllm/benchmarks/datasets.py
class HuggingFaceDataset(BenchmarkDataset):
    """Base class for datasets hosted on HuggingFace."""

    SUPPORTED_DATASET_PATHS: Union[set[str], dict[str, Callable]] = set()

    def __init__(
        self,
        dataset_path: str,
        dataset_split: str,
        dataset_subset: Optional[str] = None,
        **kwargs,
    ) -> None:
        super().__init__(dataset_path=dataset_path, **kwargs)

        self.dataset_split = dataset_split
        self.dataset_subset = dataset_subset
        self.load_data()

    def load_data(self) -> None:
        """Load data from HuggingFace datasets."""
        self.data = load_dataset(
            self.dataset_path,
            name=self.dataset_subset,
            split=self.dataset_split,
            streaming=True,
        )
        self.data = self.data.shuffle(seed=self.random_seed)

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS: Union[
    set[str], dict[str, Callable]
] = set()

dataset_split instance-attribute

dataset_split = dataset_split

dataset_subset instance-attribute

dataset_subset = dataset_subset

__init__

__init__(
    dataset_path: str,
    dataset_split: str,
    dataset_subset: Optional[str] = None,
    **kwargs,
) -> None
Source code in vllm/benchmarks/datasets.py
def __init__(
    self,
    dataset_path: str,
    dataset_split: str,
    dataset_subset: Optional[str] = None,
    **kwargs,
) -> None:
    super().__init__(dataset_path=dataset_path, **kwargs)

    self.dataset_split = dataset_split
    self.dataset_subset = dataset_subset
    self.load_data()

load_data

load_data() -> None

Load data from HuggingFace datasets.

Source code in vllm/benchmarks/datasets.py
def load_data(self) -> None:
    """Load data from HuggingFace datasets."""
    self.data = load_dataset(
        self.dataset_path,
        name=self.dataset_subset,
        split=self.dataset_split,
        streaming=True,
    )
    self.data = self.data.shuffle(seed=self.random_seed)

InstructCoderDataset

Bases: HuggingFaceDataset

InstructCoder Dataset. https://huggingface.co/datasets/likaixin/InstructCoder

InstructCoder is the dataset designed for general code editing. It consists of 114,239 instruction-input-output triplets, and covers multiple distinct code editing scenario.

Source code in vllm/benchmarks/datasets.py
class InstructCoderDataset(HuggingFaceDataset):
    """
    InstructCoder Dataset.
    https://huggingface.co/datasets/likaixin/InstructCoder

    InstructCoder is the dataset designed for general code editing.  It consists
    of 114,239 instruction-input-output triplets, and covers multiple distinct
    code editing scenario.
    """

    DEFAULT_OUTPUT_LEN = 200  # this is the average default output length
    SUPPORTED_DATASET_PATHS = {
        "likaixin/InstructCoder",
    }

    def sample(self,
               tokenizer: PreTrainedTokenizerBase,
               num_requests: int,
               output_len: Optional[int] = None,
               enable_multimodal_chat: bool = False,
               **kwargs) -> list:
        output_len = (output_len
                      if output_len is not None else self.DEFAULT_OUTPUT_LEN)
        sampled_requests = []
        for item in self.data:
            if len(sampled_requests) >= num_requests:
                break
            prompt = f"{item['input']}\n\n{item['instruction']} Just output \
            the code, do not include any explanation."

            # apply template
            prompt = tokenizer.apply_chat_template(
                [{
                    "role": "user",
                    "content": prompt
                }],
                add_generation_prompt=True,
                tokenize=False,
            )

            prompt_len = len(tokenizer(prompt).input_ids)
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                ))
        self.maybe_oversample_requests(sampled_requests, num_requests)
        return sampled_requests

DEFAULT_OUTPUT_LEN class-attribute instance-attribute

DEFAULT_OUTPUT_LEN = 200

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {'likaixin/InstructCoder'}

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(self,
           tokenizer: PreTrainedTokenizerBase,
           num_requests: int,
           output_len: Optional[int] = None,
           enable_multimodal_chat: bool = False,
           **kwargs) -> list:
    output_len = (output_len
                  if output_len is not None else self.DEFAULT_OUTPUT_LEN)
    sampled_requests = []
    for item in self.data:
        if len(sampled_requests) >= num_requests:
            break
        prompt = f"{item['input']}\n\n{item['instruction']} Just output \
        the code, do not include any explanation."

        # apply template
        prompt = tokenizer.apply_chat_template(
            [{
                "role": "user",
                "content": prompt
            }],
            add_generation_prompt=True,
            tokenize=False,
        )

        prompt_len = len(tokenizer(prompt).input_ids)
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
            ))
    self.maybe_oversample_requests(sampled_requests, num_requests)
    return sampled_requests

MTBenchDataset

Bases: HuggingFaceDataset

MT-Bench Dataset. https://huggingface.co/datasets/philschmid/mt-bench

We create a single turn dataset for MT-Bench. This is similar to Spec decoding benchmark setup in vLLM https://github.com/vllm-project/vllm/blob/9d98ab5ec/examples/offline_inference/eagle.py#L14-L18

Source code in vllm/benchmarks/datasets.py
class MTBenchDataset(HuggingFaceDataset):
    """
    MT-Bench Dataset.
    https://huggingface.co/datasets/philschmid/mt-bench

    We create a single turn dataset for MT-Bench.
    This is similar to Spec decoding benchmark setup in vLLM
    https://github.com/vllm-project/vllm/blob/9d98ab5ec/examples/offline_inference/eagle.py#L14-L18
    """  # noqa: E501

    DEFAULT_OUTPUT_LEN = 256  # avg len used in SD bench in vLLM
    SUPPORTED_DATASET_PATHS = {
        "philschmid/mt-bench",
    }

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        output_len: Optional[int] = None,
        enable_multimodal_chat: bool = False,
        **kwargs,
    ) -> list:
        output_len = (output_len
                      if output_len is not None else self.DEFAULT_OUTPUT_LEN)
        sampled_requests = []

        for item in self.data:
            if len(sampled_requests) >= num_requests:
                break
            prompt = item["turns"][0]

            # apply template
            prompt = tokenizer.apply_chat_template(
                [{
                    "role": "user",
                    "content": prompt
                }],
                add_generation_prompt=True,
                tokenize=False,
            )

            prompt_len = len(tokenizer(prompt).input_ids)
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                ))
        self.maybe_oversample_requests(sampled_requests, num_requests)
        return sampled_requests

DEFAULT_OUTPUT_LEN class-attribute instance-attribute

DEFAULT_OUTPUT_LEN = 256

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {'philschmid/mt-bench'}

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list:
    output_len = (output_len
                  if output_len is not None else self.DEFAULT_OUTPUT_LEN)
    sampled_requests = []

    for item in self.data:
        if len(sampled_requests) >= num_requests:
            break
        prompt = item["turns"][0]

        # apply template
        prompt = tokenizer.apply_chat_template(
            [{
                "role": "user",
                "content": prompt
            }],
            add_generation_prompt=True,
            tokenize=False,
        )

        prompt_len = len(tokenizer(prompt).input_ids)
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
            ))
    self.maybe_oversample_requests(sampled_requests, num_requests)
    return sampled_requests

NextEditPredictionDataset

Bases: HuggingFaceDataset

Dataset class for processing a Next Edit Prediction dataset.

Source code in vllm/benchmarks/datasets.py
class NextEditPredictionDataset(HuggingFaceDataset):
    """
    Dataset class for processing a Next Edit Prediction dataset.
    """

    SUPPORTED_DATASET_PATHS = {
        "zed-industries/zeta",
    }
    MAPPING_PROMPT_FUNCS = {
        "zed-industries/zeta": _format_zeta_prompt,
    }

    def sample(self, tokenizer: PreTrainedTokenizerBase, num_requests: int,
               **kwargs):
        formatting_prompt_func = self.MAPPING_PROMPT_FUNCS.get(
            self.dataset_path)
        if formatting_prompt_func is None:
            raise ValueError(f"Unsupported dataset path: {self.dataset_path}")
        samples = []
        for sample in self.data:
            sample = formatting_prompt_func(sample)
            samples.append(
                SampleRequest(
                    prompt=sample["prompt"],
                    prompt_len=len(tokenizer(sample["prompt"]).input_ids),
                    expected_output_len=len(
                        tokenizer(sample["expected_output"]).input_ids),
                ))
            if len(samples) >= num_requests:
                break
        self.maybe_oversample_requests(samples, num_requests)
        return samples

MAPPING_PROMPT_FUNCS class-attribute instance-attribute

MAPPING_PROMPT_FUNCS = {
    "zed-industries/zeta": _format_zeta_prompt
}

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {'zed-industries/zeta'}

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    **kwargs,
)
Source code in vllm/benchmarks/datasets.py
def sample(self, tokenizer: PreTrainedTokenizerBase, num_requests: int,
           **kwargs):
    formatting_prompt_func = self.MAPPING_PROMPT_FUNCS.get(
        self.dataset_path)
    if formatting_prompt_func is None:
        raise ValueError(f"Unsupported dataset path: {self.dataset_path}")
    samples = []
    for sample in self.data:
        sample = formatting_prompt_func(sample)
        samples.append(
            SampleRequest(
                prompt=sample["prompt"],
                prompt_len=len(tokenizer(sample["prompt"]).input_ids),
                expected_output_len=len(
                    tokenizer(sample["expected_output"]).input_ids),
            ))
        if len(samples) >= num_requests:
            break
    self.maybe_oversample_requests(samples, num_requests)
    return samples

RandomDataset

Bases: BenchmarkDataset

Source code in vllm/benchmarks/datasets.py
class RandomDataset(BenchmarkDataset):
    # Default values copied from benchmark_serving.py for the random dataset.
    DEFAULT_PREFIX_LEN = 0
    DEFAULT_RANGE_RATIO = 0.0
    DEFAULT_INPUT_LEN = 1024
    DEFAULT_OUTPUT_LEN = 128

    def __init__(
        self,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        random.seed(self.random_seed)
        np.random.seed(self.random_seed)

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        prefix_len: int = DEFAULT_PREFIX_LEN,
        range_ratio: float = DEFAULT_RANGE_RATIO,
        input_len: int = DEFAULT_INPUT_LEN,
        output_len: int = DEFAULT_OUTPUT_LEN,
        **kwargs,
    ) -> list[SampleRequest]:
        # Enforce range_ratio < 1
        assert range_ratio < 1.0, (
            "random_range_ratio must be < 1.0 to ensure a valid sampling range"
        )

        vocab_size = tokenizer.vocab_size
        num_special_tokens = tokenizer.num_special_tokens_to_add()
        real_input_len = input_len - num_special_tokens

        prefix_token_ids = (np.random.randint(
            0, vocab_size, size=prefix_len).tolist() if prefix_len > 0 else [])

        # New sampling logic: [X * (1 - b), X * (1 + b)]
        input_low = int(real_input_len * (1 - range_ratio))
        input_high = int(real_input_len * (1 + range_ratio))
        output_low = int(output_len * (1 - range_ratio))
        output_high = int(output_len * (1 + range_ratio))

        # Add logging for debugging
        logger.info(
            "Sampling input_len from [%s, %s] and output_len from [%s, %s]",
            input_low, input_high, output_low, output_high)

        input_lens = np.random.randint(input_low,
                                       input_high + 1,
                                       size=num_requests)
        output_lens = np.random.randint(output_low,
                                        output_high + 1,
                                        size=num_requests)
        offsets = np.random.randint(0, vocab_size, size=num_requests)

        requests = []
        for i in range(num_requests):
            inner_seq = ((offsets[i] + i + np.arange(input_lens[i])) %
                         vocab_size).tolist()
            token_sequence = prefix_token_ids + inner_seq
            prompt = tokenizer.decode(token_sequence)
            # After decoding the prompt we have to encode and decode it again.
            # This is done because in some cases N consecutive tokens
            # give a string tokenized into != N number of tokens.
            # For example for GPT2Tokenizer:
            # [6880, 6881] -> ['Ġcalls', 'here'] ->
            # [1650, 939, 486] -> ['Ġcall', 'sh', 'ere']
            # To avoid uncontrolled change of the prompt length,
            # the encoded sequence is truncated before being decode again.
            total_input_len = prefix_len + int(input_lens[i])
            re_encoded_sequence = tokenizer.encode(
                prompt, add_special_tokens=False)[:total_input_len]
            prompt = tokenizer.decode(re_encoded_sequence)
            total_input_len = len(re_encoded_sequence)
            requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=total_input_len,
                    expected_output_len=int(output_lens[i]),
                ))
        return requests

DEFAULT_INPUT_LEN class-attribute instance-attribute

DEFAULT_INPUT_LEN = 1024

DEFAULT_OUTPUT_LEN class-attribute instance-attribute

DEFAULT_OUTPUT_LEN = 128

DEFAULT_PREFIX_LEN class-attribute instance-attribute

DEFAULT_PREFIX_LEN = 0

DEFAULT_RANGE_RATIO class-attribute instance-attribute

DEFAULT_RANGE_RATIO = 0.0

__init__

__init__(**kwargs) -> None
Source code in vllm/benchmarks/datasets.py
def __init__(
    self,
    **kwargs,
) -> None:
    super().__init__(**kwargs)
    random.seed(self.random_seed)
    np.random.seed(self.random_seed)

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    prefix_len: int = DEFAULT_PREFIX_LEN,
    range_ratio: float = DEFAULT_RANGE_RATIO,
    input_len: int = DEFAULT_INPUT_LEN,
    output_len: int = DEFAULT_OUTPUT_LEN,
    **kwargs,
) -> list[SampleRequest]
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    prefix_len: int = DEFAULT_PREFIX_LEN,
    range_ratio: float = DEFAULT_RANGE_RATIO,
    input_len: int = DEFAULT_INPUT_LEN,
    output_len: int = DEFAULT_OUTPUT_LEN,
    **kwargs,
) -> list[SampleRequest]:
    # Enforce range_ratio < 1
    assert range_ratio < 1.0, (
        "random_range_ratio must be < 1.0 to ensure a valid sampling range"
    )

    vocab_size = tokenizer.vocab_size
    num_special_tokens = tokenizer.num_special_tokens_to_add()
    real_input_len = input_len - num_special_tokens

    prefix_token_ids = (np.random.randint(
        0, vocab_size, size=prefix_len).tolist() if prefix_len > 0 else [])

    # New sampling logic: [X * (1 - b), X * (1 + b)]
    input_low = int(real_input_len * (1 - range_ratio))
    input_high = int(real_input_len * (1 + range_ratio))
    output_low = int(output_len * (1 - range_ratio))
    output_high = int(output_len * (1 + range_ratio))

    # Add logging for debugging
    logger.info(
        "Sampling input_len from [%s, %s] and output_len from [%s, %s]",
        input_low, input_high, output_low, output_high)

    input_lens = np.random.randint(input_low,
                                   input_high + 1,
                                   size=num_requests)
    output_lens = np.random.randint(output_low,
                                    output_high + 1,
                                    size=num_requests)
    offsets = np.random.randint(0, vocab_size, size=num_requests)

    requests = []
    for i in range(num_requests):
        inner_seq = ((offsets[i] + i + np.arange(input_lens[i])) %
                     vocab_size).tolist()
        token_sequence = prefix_token_ids + inner_seq
        prompt = tokenizer.decode(token_sequence)
        # After decoding the prompt we have to encode and decode it again.
        # This is done because in some cases N consecutive tokens
        # give a string tokenized into != N number of tokens.
        # For example for GPT2Tokenizer:
        # [6880, 6881] -> ['Ġcalls', 'here'] ->
        # [1650, 939, 486] -> ['Ġcall', 'sh', 'ere']
        # To avoid uncontrolled change of the prompt length,
        # the encoded sequence is truncated before being decode again.
        total_input_len = prefix_len + int(input_lens[i])
        re_encoded_sequence = tokenizer.encode(
            prompt, add_special_tokens=False)[:total_input_len]
        prompt = tokenizer.decode(re_encoded_sequence)
        total_input_len = len(re_encoded_sequence)
        requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=total_input_len,
                expected_output_len=int(output_lens[i]),
            ))
    return requests

SampleRequest dataclass

Represents a single inference request for benchmarking.

Source code in vllm/benchmarks/datasets.py
@dataclass
class SampleRequest:
    """
    Represents a single inference request for benchmarking.
    """

    prompt: Union[str, Any]
    prompt_len: int
    expected_output_len: int
    multi_modal_data: Optional[Union[MultiModalDataDict, dict]] = None
    lora_request: Optional[LoRARequest] = None

expected_output_len instance-attribute

expected_output_len: int

lora_request class-attribute instance-attribute

lora_request: Optional[LoRARequest] = None

multi_modal_data class-attribute instance-attribute

multi_modal_data: Optional[
    Union[MultiModalDataDict, dict]
] = None

prompt instance-attribute

prompt: Union[str, Any]

prompt_len instance-attribute

prompt_len: int

__init__

__init__(
    prompt: Union[str, Any],
    prompt_len: int,
    expected_output_len: int,
    multi_modal_data: Optional[
        Union[MultiModalDataDict, dict]
    ] = None,
    lora_request: Optional[LoRARequest] = None,
) -> None

ShareGPTDataset

Bases: BenchmarkDataset

Implements the ShareGPT dataset. Loads data from a JSON file and generates sample requests based on conversation turns.

Source code in vllm/benchmarks/datasets.py
class ShareGPTDataset(BenchmarkDataset):
    """
    Implements the ShareGPT dataset.  Loads data from a JSON file and generates
    sample requests based on conversation turns.
    """

    def __init__(self, **kwargs) -> None:
        super().__init__(**kwargs)
        self.load_data()

    def load_data(self) -> None:
        if self.dataset_path is None:
            raise ValueError("dataset_path must be provided for loading data.")

        with open(self.dataset_path, encoding="utf-8") as f:
            self.data = json.load(f)
        # Filter entries with at least two conversation turns.
        self.data = [
            entry for entry in self.data
            if "conversations" in entry and len(entry["conversations"]) >= 2
        ]
        random.seed(self.random_seed)
        random.shuffle(self.data)

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        lora_path: Optional[str] = None,
        max_loras: Optional[int] = None,
        output_len: Optional[int] = None,
        enable_multimodal_chat: bool = False,
        **kwargs,
    ) -> list:
        samples: list = []
        for entry in self.data:
            if len(samples) >= num_requests:
                break
            prompt, completion = (
                entry["conversations"][0]["value"],
                entry["conversations"][1]["value"],
            )

            lora_request, tokenizer = self.get_random_lora_request(
                tokenizer=tokenizer, max_loras=max_loras, lora_path=lora_path)
            prompt_ids = tokenizer(prompt).input_ids
            completion_ids = tokenizer(completion).input_ids
            prompt_len = len(prompt_ids)
            new_output_len = (len(completion_ids)
                              if output_len is None else output_len)
            if not is_valid_sequence(prompt_len,
                                     new_output_len,
                                     skip_min_output_len_check=output_len
                                     is not None):
                continue
            if enable_multimodal_chat:
                prompt = self.apply_multimodal_chat_transformation(
                    prompt, None)
            samples.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=new_output_len,
                    lora_request=lora_request,
                ))
        self.maybe_oversample_requests(samples, num_requests)
        return samples

__init__

__init__(**kwargs) -> None
Source code in vllm/benchmarks/datasets.py
def __init__(self, **kwargs) -> None:
    super().__init__(**kwargs)
    self.load_data()

load_data

load_data() -> None
Source code in vllm/benchmarks/datasets.py
def load_data(self) -> None:
    if self.dataset_path is None:
        raise ValueError("dataset_path must be provided for loading data.")

    with open(self.dataset_path, encoding="utf-8") as f:
        self.data = json.load(f)
    # Filter entries with at least two conversation turns.
    self.data = [
        entry for entry in self.data
        if "conversations" in entry and len(entry["conversations"]) >= 2
    ]
    random.seed(self.random_seed)
    random.shuffle(self.data)

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    lora_path: Optional[str] = None,
    max_loras: Optional[int] = None,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    lora_path: Optional[str] = None,
    max_loras: Optional[int] = None,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list:
    samples: list = []
    for entry in self.data:
        if len(samples) >= num_requests:
            break
        prompt, completion = (
            entry["conversations"][0]["value"],
            entry["conversations"][1]["value"],
        )

        lora_request, tokenizer = self.get_random_lora_request(
            tokenizer=tokenizer, max_loras=max_loras, lora_path=lora_path)
        prompt_ids = tokenizer(prompt).input_ids
        completion_ids = tokenizer(completion).input_ids
        prompt_len = len(prompt_ids)
        new_output_len = (len(completion_ids)
                          if output_len is None else output_len)
        if not is_valid_sequence(prompt_len,
                                 new_output_len,
                                 skip_min_output_len_check=output_len
                                 is not None):
            continue
        if enable_multimodal_chat:
            prompt = self.apply_multimodal_chat_transformation(
                prompt, None)
        samples.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=new_output_len,
                lora_request=lora_request,
            ))
    self.maybe_oversample_requests(samples, num_requests)
    return samples

SonnetDataset

Bases: BenchmarkDataset

Simplified implementation of the Sonnet dataset. Loads poem lines from a text file and generates sample requests. Default values here copied from benchmark_serving.py for the sonnet dataset.

Source code in vllm/benchmarks/datasets.py
class SonnetDataset(BenchmarkDataset):
    """
    Simplified implementation of the Sonnet dataset.  Loads poem lines from a
    text file and generates sample requests.  Default values here copied from
    `benchmark_serving.py` for the sonnet dataset.
    """

    DEFAULT_PREFIX_LEN = 200
    DEFAULT_INPUT_LEN = 550
    DEFAULT_OUTPUT_LEN = 150

    def __init__(
        self,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self.load_data()

    def load_data(self) -> None:
        if not self.dataset_path:
            raise ValueError("dataset_path must be provided.")
        with open(self.dataset_path, encoding="utf-8") as f:
            self.data = f.readlines()

    def sample(
        self,
        tokenizer,
        num_requests: int,
        prefix_len: int = DEFAULT_PREFIX_LEN,
        input_len: int = DEFAULT_INPUT_LEN,
        output_len: int = DEFAULT_OUTPUT_LEN,
        return_prompt_formatted: bool = False,
        **kwargs,
    ) -> list:
        # Calculate average token length for a poem line.
        tokenized_lines = [tokenizer(line).input_ids for line in self.data]
        avg_len = sum(len(tokens)
                      for tokens in tokenized_lines) / len(tokenized_lines)

        # Build the base prompt.
        base_prompt = "Pick as many lines as you can from these poem lines:\n"
        base_msg = [{"role": "user", "content": base_prompt}]
        base_fmt = tokenizer.apply_chat_template(base_msg,
                                                 add_generation_prompt=True,
                                                 tokenize=False)
        base_offset = len(tokenizer(base_fmt).input_ids)
        if input_len <= base_offset:
            raise ValueError(
                f"'input_len' must be higher than the base prompt length "
                f"({base_offset}).")

        # Determine how many poem lines to use.
        num_input_lines = round((input_len - base_offset) / avg_len)
        num_prefix_lines = max(round((prefix_len - base_offset) / avg_len), 0)
        prefix_lines = self.data[:num_prefix_lines]

        samples = []
        while len(samples) < num_requests:
            extra_lines = random.choices(self.data,
                                         k=num_input_lines - num_prefix_lines)
            prompt = f"{base_prompt}{''.join(prefix_lines + extra_lines)}"
            msg = [{"role": "user", "content": prompt}]
            prompt_formatted = tokenizer.apply_chat_template(
                msg, add_generation_prompt=True, tokenize=False)
            prompt_len = len(tokenizer(prompt_formatted).input_ids)
            if prompt_len <= input_len:
                samples.append(
                    SampleRequest(
                        prompt=prompt_formatted
                        if return_prompt_formatted else prompt,
                        prompt_len=prompt_len,
                        expected_output_len=output_len,
                    ))
        return samples

DEFAULT_INPUT_LEN class-attribute instance-attribute

DEFAULT_INPUT_LEN = 550

DEFAULT_OUTPUT_LEN class-attribute instance-attribute

DEFAULT_OUTPUT_LEN = 150

DEFAULT_PREFIX_LEN class-attribute instance-attribute

DEFAULT_PREFIX_LEN = 200

__init__

__init__(**kwargs) -> None
Source code in vllm/benchmarks/datasets.py
def __init__(
    self,
    **kwargs,
) -> None:
    super().__init__(**kwargs)
    self.load_data()

load_data

load_data() -> None
Source code in vllm/benchmarks/datasets.py
def load_data(self) -> None:
    if not self.dataset_path:
        raise ValueError("dataset_path must be provided.")
    with open(self.dataset_path, encoding="utf-8") as f:
        self.data = f.readlines()

sample

sample(
    tokenizer,
    num_requests: int,
    prefix_len: int = DEFAULT_PREFIX_LEN,
    input_len: int = DEFAULT_INPUT_LEN,
    output_len: int = DEFAULT_OUTPUT_LEN,
    return_prompt_formatted: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer,
    num_requests: int,
    prefix_len: int = DEFAULT_PREFIX_LEN,
    input_len: int = DEFAULT_INPUT_LEN,
    output_len: int = DEFAULT_OUTPUT_LEN,
    return_prompt_formatted: bool = False,
    **kwargs,
) -> list:
    # Calculate average token length for a poem line.
    tokenized_lines = [tokenizer(line).input_ids for line in self.data]
    avg_len = sum(len(tokens)
                  for tokens in tokenized_lines) / len(tokenized_lines)

    # Build the base prompt.
    base_prompt = "Pick as many lines as you can from these poem lines:\n"
    base_msg = [{"role": "user", "content": base_prompt}]
    base_fmt = tokenizer.apply_chat_template(base_msg,
                                             add_generation_prompt=True,
                                             tokenize=False)
    base_offset = len(tokenizer(base_fmt).input_ids)
    if input_len <= base_offset:
        raise ValueError(
            f"'input_len' must be higher than the base prompt length "
            f"({base_offset}).")

    # Determine how many poem lines to use.
    num_input_lines = round((input_len - base_offset) / avg_len)
    num_prefix_lines = max(round((prefix_len - base_offset) / avg_len), 0)
    prefix_lines = self.data[:num_prefix_lines]

    samples = []
    while len(samples) < num_requests:
        extra_lines = random.choices(self.data,
                                     k=num_input_lines - num_prefix_lines)
        prompt = f"{base_prompt}{''.join(prefix_lines + extra_lines)}"
        msg = [{"role": "user", "content": prompt}]
        prompt_formatted = tokenizer.apply_chat_template(
            msg, add_generation_prompt=True, tokenize=False)
        prompt_len = len(tokenizer(prompt_formatted).input_ids)
        if prompt_len <= input_len:
            samples.append(
                SampleRequest(
                    prompt=prompt_formatted
                    if return_prompt_formatted else prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                ))
    return samples

VisionArenaDataset

Bases: HuggingFaceDataset

Vision Arena Dataset.

Source code in vllm/benchmarks/datasets.py
class VisionArenaDataset(HuggingFaceDataset):
    """
    Vision Arena Dataset.
    """

    DEFAULT_OUTPUT_LEN = 128
    SUPPORTED_DATASET_PATHS = {
        "lmarena-ai/VisionArena-Chat":
        lambda x: x["conversation"][0][0]["content"],
        "lmarena-ai/vision-arena-bench-v0.1":
        lambda x: x["turns"][0][0]["content"]
    }
    IS_MULTIMODAL = True

    def sample(
        self,
        tokenizer: PreTrainedTokenizerBase,
        num_requests: int,
        output_len: Optional[int] = None,
        enable_multimodal_chat: bool = False,
        **kwargs,
    ) -> list:
        output_len = (output_len
                      if output_len is not None else self.DEFAULT_OUTPUT_LEN)
        sampled_requests = []
        for item in self.data:
            if len(sampled_requests) >= num_requests:
                break
            parser_fn = self.SUPPORTED_DATASET_PATHS.get(self.dataset_path)
            if parser_fn is None:
                raise ValueError(
                    f"Unsupported dataset path: {self.dataset_path}")
            prompt = parser_fn(item)
            mm_content = process_image(item["images"][0])
            prompt_len = len(tokenizer(prompt).input_ids)
            if enable_multimodal_chat:
                # Note: when chat is enabled the request prompt_len is no longer
                # accurate and we will be using request output to count the
                # actual prompt len
                prompt = self.apply_multimodal_chat_transformation(
                    prompt, mm_content)
            sampled_requests.append(
                SampleRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    expected_output_len=output_len,
                    multi_modal_data=mm_content,
                ))
        self.maybe_oversample_requests(sampled_requests, num_requests)
        return sampled_requests

DEFAULT_OUTPUT_LEN class-attribute instance-attribute

DEFAULT_OUTPUT_LEN = 128

IS_MULTIMODAL class-attribute instance-attribute

IS_MULTIMODAL = True

SUPPORTED_DATASET_PATHS class-attribute instance-attribute

SUPPORTED_DATASET_PATHS = {
    "lmarena-ai/VisionArena-Chat": lambda x: x[
        "conversation"
    ][0][0]["content"],
    "lmarena-ai/vision-arena-bench-v0.1": lambda x: x[
        "turns"
    ][0][0]["content"],
}

sample

sample(
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list
Source code in vllm/benchmarks/datasets.py
def sample(
    self,
    tokenizer: PreTrainedTokenizerBase,
    num_requests: int,
    output_len: Optional[int] = None,
    enable_multimodal_chat: bool = False,
    **kwargs,
) -> list:
    output_len = (output_len
                  if output_len is not None else self.DEFAULT_OUTPUT_LEN)
    sampled_requests = []
    for item in self.data:
        if len(sampled_requests) >= num_requests:
            break
        parser_fn = self.SUPPORTED_DATASET_PATHS.get(self.dataset_path)
        if parser_fn is None:
            raise ValueError(
                f"Unsupported dataset path: {self.dataset_path}")
        prompt = parser_fn(item)
        mm_content = process_image(item["images"][0])
        prompt_len = len(tokenizer(prompt).input_ids)
        if enable_multimodal_chat:
            # Note: when chat is enabled the request prompt_len is no longer
            # accurate and we will be using request output to count the
            # actual prompt len
            prompt = self.apply_multimodal_chat_transformation(
                prompt, mm_content)
        sampled_requests.append(
            SampleRequest(
                prompt=prompt,
                prompt_len=prompt_len,
                expected_output_len=output_len,
                multi_modal_data=mm_content,
            ))
    self.maybe_oversample_requests(sampled_requests, num_requests)
    return sampled_requests

_format_zeta_prompt

_format_zeta_prompt(
    sample: dict,
    original_start_marker: str = "<|editable_region_start|>",
) -> dict

Format the zeta prompt for the Next Edit Prediction (NEP) dataset.

This function formats examples from the NEP dataset into prompts and expected outputs. It could be further extended to support more NEP datasets.

Parameters:

Name Type Description Default
sample dict

The dataset sample containing events, inputs, and outputs.

required
original_start_marker str

The marker indicating the start of the editable region. Defaults to "<|editable_region_start|>".

'<|editable_region_start|>'

Returns:

Type Description
dict

A dictionary with the formatted prompts and expected outputs.

Source code in vllm/benchmarks/datasets.py
def _format_zeta_prompt(
        sample: dict,
        original_start_marker: str = "<|editable_region_start|>") -> dict:
    """Format the zeta prompt for the Next Edit Prediction (NEP) dataset.

    This function formats examples from the NEP dataset
    into prompts and expected outputs. It could be
    further extended to support more NEP datasets.

    Args:
        sample: The dataset sample containing events,
            inputs, and outputs.
        original_start_marker: The marker indicating the
            start of the editable region. Defaults to
            "<|editable_region_start|>".

    Returns:
        A dictionary with the formatted prompts and expected outputs.
    """
    events = sample["events"]
    input = sample["input"]
    output = sample["output"]
    prompt = zeta_prompt.format(events, input)

    # following the original implementation, extract the focused region
    # from the raw output
    output_start_index = output.find(original_start_marker)
    output_focused_region = output[output_start_index:]
    expected_output = output_focused_region

    return {"prompt": prompt, "expected_output": expected_output}

add_dataset_parser

add_dataset_parser(parser: ArgumentParser)
Source code in vllm/benchmarks/datasets.py
def add_dataset_parser(parser: FlexibleArgumentParser):
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument(
        "--num-prompts",
        type=int,
        default=1000,
        help="Number of prompts to process.",
    )
    parser.add_argument(
        "--dataset-name",
        type=str,
        default="random",
        choices=["sharegpt", "burstgpt", "sonnet", "random", "hf", "custom"],
        help="Name of the dataset to benchmark on.",
    )
    parser.add_argument(
        "--dataset-path",
        type=str,
        default=None,
        help="Path to the sharegpt/sonnet dataset. "
        "Or the huggingface dataset ID if using HF dataset.",
    )

    # group for dataset specific arguments
    custom_group = parser.add_argument_group("custom dataset options")
    custom_group.add_argument(
        "--custom-output-len",
        type=int,
        default=256,
        help=
        "Number of output tokens per request, used only for custom dataset.",
    )
    custom_group.add_argument(
        "--custom-skip-chat-template",
        action="store_true",
        help=
        "Skip applying chat template to prompt, used only for custom dataset.",
    )

    sonnet_group = parser.add_argument_group("sonnet dataset options")
    sonnet_group.add_argument(
        "--sonnet-input-len",
        type=int,
        default=550,
        help=
        "Number of input tokens per request, used only for sonnet dataset.",
    )
    sonnet_group.add_argument(
        "--sonnet-output-len",
        type=int,
        default=150,
        help=
        "Number of output tokens per request, used only for sonnet dataset.",
    )
    sonnet_group.add_argument(
        "--sonnet-prefix-len",
        type=int,
        default=200,
        help=
        "Number of prefix tokens per request, used only for sonnet dataset.",
    )

    sharegpt_group = parser.add_argument_group("sharegpt dataset options")
    sharegpt_group.add_argument(
        "--sharegpt-output-len",
        type=int,
        default=None,
        help="Output length for each request. Overrides the output length "
        "from the ShareGPT dataset.",
    )

    random_group = parser.add_argument_group("random dataset options")
    random_group.add_argument(
        "--random-input-len",
        type=int,
        default=1024,
        help=
        "Number of input tokens per request, used only for random sampling.",
    )
    random_group.add_argument(
        "--random-output-len",
        type=int,
        default=128,
        help=
        "Number of output tokens per request, used only for random sampling.",
    )
    random_group.add_argument(
        "--random-range-ratio",
        type=float,
        default=0.0,
        help="Range ratio for sampling input/output length, "
        "used only for random sampling. Must be in the range [0, 1) to define "
        "a symmetric sampling range"
        "[length * (1 - range_ratio), length * (1 + range_ratio)].",
    )
    random_group.add_argument(
        "--random-prefix-len",
        type=int,
        default=0,
        help=("Number of fixed prefix tokens before the random context "
              "in a request. "
              "The total input length is the sum of `random-prefix-len` and "
              "a random "
              "context length sampled from [input_len * (1 - range_ratio), "
              "input_len * (1 + range_ratio)]."),
    )

    hf_group = parser.add_argument_group("hf dataset options")
    hf_group.add_argument("--hf-subset",
                          type=str,
                          default=None,
                          help="Subset of the HF dataset.")
    hf_group.add_argument("--hf-split",
                          type=str,
                          default=None,
                          help="Split of the HF dataset.")
    hf_group.add_argument(
        "--hf-output-len",
        type=int,
        default=None,
        help="Output length for each request. Overrides the output lengths "
        "from the sampled HF dataset.",
    )

get_samples

get_samples(args, tokenizer) -> list[SampleRequest]
Source code in vllm/benchmarks/datasets.py
def get_samples(args, tokenizer) -> list[SampleRequest]:
    if args.dataset_name == "custom":
        dataset = CustomDataset(dataset_path=args.dataset_path)
        input_requests = dataset.sample(
            num_requests=args.num_prompts,
            tokenizer=tokenizer,
            output_len=args.custom_output_len,
            skip_chat_template=args.custom_skip_chat_template,
        )

    elif args.dataset_name == "sonnet":
        dataset = SonnetDataset(dataset_path=args.dataset_path)
        # For the "sonnet" dataset, formatting depends on the backend.
        if args.endpoint_type == "openai-chat":
            input_requests = dataset.sample(
                num_requests=args.num_prompts,
                input_len=args.sonnet_input_len,
                output_len=args.sonnet_output_len,
                prefix_len=args.sonnet_prefix_len,
                tokenizer=tokenizer,
                return_prompt_formatted=False,
            )
        else:
            assert tokenizer.chat_template or tokenizer.default_chat_template, (
                "Tokenizer/model must have chat template for sonnet dataset.")
            input_requests = dataset.sample(
                num_requests=args.num_prompts,
                input_len=args.sonnet_input_len,
                output_len=args.sonnet_output_len,
                prefix_len=args.sonnet_prefix_len,
                tokenizer=tokenizer,
                return_prompt_formatted=True,
            )

    elif args.dataset_name == "hf":
        # all following datasets are implemented from the
        # HuggingFaceDataset base class
        if args.dataset_path in VisionArenaDataset.SUPPORTED_DATASET_PATHS:
            dataset_class = VisionArenaDataset
            args.hf_split = "train"
            args.hf_subset = None
        elif args.dataset_path in InstructCoderDataset.SUPPORTED_DATASET_PATHS:
            dataset_class = InstructCoderDataset
            args.hf_split = "train"
        elif args.dataset_path in MTBenchDataset.SUPPORTED_DATASET_PATHS:
            dataset_class = MTBenchDataset
            args.hf_split = "train"
        elif args.dataset_path in ConversationDataset.SUPPORTED_DATASET_PATHS:
            dataset_class = ConversationDataset
        elif args.dataset_path in AIMODataset.SUPPORTED_DATASET_PATHS:
            dataset_class = AIMODataset
            args.hf_split = "train"
        elif args.dataset_path in NextEditPredictionDataset.SUPPORTED_DATASET_PATHS:  # noqa: E501
            dataset_class = NextEditPredictionDataset
            args.hf_split = "train"
        elif args.dataset_path in ASRDataset.SUPPORTED_DATASET_PATHS:
            dataset_class = ASRDataset
            args.hf_split = "train"
        else:
            supported_datasets = set([
                dataset_name for cls in HuggingFaceDataset.__subclasses__()
                for dataset_name in cls.SUPPORTED_DATASET_PATHS
            ])
            raise ValueError(
                f"Unsupported dataset path: {args.dataset_path}. "
                "Huggingface dataset only supports dataset_path"
                f" from one of following: {supported_datasets}. "
                "Please consider contributing if you would "
                "like to add support for additional dataset formats.")

        if dataset_class.IS_MULTIMODAL and args.endpoint_type not in [
                "openai-chat",
                "openai-audio",
        ]:
            # multi-modal benchmark is only available on OpenAI Chat backend.
            raise ValueError(
                "Multi-modal content is only supported on 'openai-chat' and "
                "'openai-audio' backend.")
        input_requests = dataset_class(
            dataset_path=args.dataset_path,
            dataset_subset=args.hf_subset,
            dataset_split=args.hf_split,
            random_seed=args.seed,
        ).sample(
            num_requests=args.num_prompts,
            tokenizer=tokenizer,
            output_len=args.hf_output_len,
        )

    else:
        # For datasets that follow a similar structure, use a mapping.
        dataset_mapping = {
            "sharegpt":
            lambda: ShareGPTDataset(random_seed=args.seed,
                                    dataset_path=args.dataset_path).sample(
                                        tokenizer=tokenizer,
                                        num_requests=args.num_prompts,
                                        output_len=args.sharegpt_output_len,
                                    ),
            "burstgpt":
            lambda: BurstGPTDataset(random_seed=args.seed,
                                    dataset_path=args.dataset_path).
            sample(tokenizer=tokenizer, num_requests=args.num_prompts),
            "random":
            lambda: RandomDataset(random_seed=args.seed,
                                  dataset_path=args.dataset_path).sample(
                tokenizer=tokenizer,
                num_requests=args.num_prompts,
                prefix_len=args.random_prefix_len,
                input_len=args.random_input_len,
                output_len=args.random_output_len,
                range_ratio=args.random_range_ratio,
            ),
        }

        try:
            input_requests = dataset_mapping[args.dataset_name]()
        except KeyError as err:
            raise ValueError(f"Unknown dataset: {args.dataset_name}") from err

    return input_requests

is_valid_sequence

is_valid_sequence(
    prompt_len: int,
    output_len: int,
    min_len: int = 4,
    max_prompt_len: int = 1024,
    max_total_len: int = 2048,
    skip_min_output_len_check: bool = False,
) -> bool

Validate a sequence based on prompt and output lengths.

Default pruning criteria are copied from the original sample_hf_requests and sample_sharegpt_requests functions in benchmark_serving.py, as well as from sample_requests in benchmark_throughput.py.

Source code in vllm/benchmarks/datasets.py
def is_valid_sequence(
    prompt_len: int,
    output_len: int,
    min_len: int = 4,
    max_prompt_len: int = 1024,
    max_total_len: int = 2048,
    skip_min_output_len_check: bool = False,
) -> bool:
    """
    Validate a sequence based on prompt and output lengths.

    Default pruning criteria are copied from the original `sample_hf_requests`
    and `sample_sharegpt_requests` functions in benchmark_serving.py, as well as
    from `sample_requests` in benchmark_throughput.py.
    """
    # Check for invalid conditions
    prompt_too_short = prompt_len < min_len
    output_too_short = (not skip_min_output_len_check) and (output_len
                                                            < min_len)
    prompt_too_long = prompt_len > max_prompt_len
    combined_too_long = (prompt_len + output_len) > max_total_len

    # Return True if none of the invalid conditions are met
    return not (prompt_too_short or output_too_short or prompt_too_long
                or combined_too_long)

lora_path_on_disk cached

lora_path_on_disk(lora_path: str) -> str
Source code in vllm/benchmarks/datasets.py
@cache
def lora_path_on_disk(lora_path: str) -> str:
    return get_adapter_absolute_path(lora_path)

process_image

process_image(image: Any) -> Mapping[str, Any]

Process a single image input and return a multimedia content dictionary.

Supports three input types:

  1. Dictionary with raw image bytes: - Expects a dict with a 'bytes' key containing raw image data. - Loads the bytes as a PIL.Image.Image.

  2. PIL.Image.Image input: - Converts the image to RGB. - Saves the image as a JPEG in memory. - Encodes the JPEG data as a base64 string. - Returns a dictionary with the image as a base64 data URL.

  3. String input: - Treats the string as a URL or local file path. - Prepends "file://" if the string doesn't start with "http://" or "file://". - Returns a dictionary with the image URL.

Raises:

Type Description
ValueError

If the input is not a supported type.

Source code in vllm/benchmarks/datasets.py
def process_image(image: Any) -> Mapping[str, Any]:
    """
    Process a single image input and return a multimedia content dictionary.

    Supports three input types:

    1. Dictionary with raw image bytes: - Expects a dict with a 'bytes' key
       containing raw image data.  - Loads the bytes as a PIL.Image.Image.

    2. PIL.Image.Image input: - Converts the image to RGB.  - Saves the image as
       a JPEG in memory.  - Encodes the JPEG data as a base64 string.  - Returns
       a dictionary with the image as a base64 data URL.

    3. String input: - Treats the string as a URL or local file path.  -
       Prepends "file://" if the string doesn't start with "http://" or
       "file://".  - Returns a dictionary with the image URL.

    Raises:
        ValueError: If the input is not a supported type.
    """
    if isinstance(image, dict) and 'bytes' in image:
        image = Image.open(BytesIO(image['bytes']))
    if isinstance(image, Image.Image):
        image = convert_image_mode(image, "RGB")
        with io.BytesIO() as image_data:
            image.save(image_data, format="JPEG")
            image_base64 = base64.b64encode(
                image_data.getvalue()).decode("utf-8")
        return {
            "type": "image_url",
            "image_url": {
                "url": f"data:image/jpeg;base64,{image_base64}"
            },
        }

    if isinstance(image, str):
        image_url = (image if image.startswith(
            ("http://", "file://")) else f"file://{image}")
        return {"type": "image_url", "image_url": {"url": image_url}}

    raise ValueError(f"Invalid image input {image}. Must be a PIL.Image.Image"
                     " or str or dictionary with raw image bytes.")