Skip to content

vllm.distributed.communication_op

broadcast_tensor_dict

broadcast_tensor_dict(
    tensor_dict: Optional[
        dict[Any, Union[Tensor, Any]]
    ] = None,
    src: int = 0,
)
Source code in vllm/distributed/communication_op.py
def broadcast_tensor_dict(tensor_dict: Optional[dict[Any, Union[torch.Tensor,
                                                                Any]]] = None,
                          src: int = 0):
    if not torch.distributed.is_initialized():
        return tensor_dict
    return get_tp_group().broadcast_tensor_dict(tensor_dict, src)

tensor_model_parallel_all_gather

tensor_model_parallel_all_gather(
    input_: Tensor, dim: int = -1
) -> Tensor

All-gather the input tensor across model parallel group.

Source code in vllm/distributed/communication_op.py
def tensor_model_parallel_all_gather(input_: torch.Tensor,
                                     dim: int = -1) -> torch.Tensor:
    """All-gather the input tensor across model parallel group."""
    return get_tp_group().all_gather(input_, dim)

tensor_model_parallel_all_reduce

tensor_model_parallel_all_reduce(input_: Tensor) -> Tensor

All-reduce the input tensor across model parallel group.

Source code in vllm/distributed/communication_op.py
def tensor_model_parallel_all_reduce(input_: torch.Tensor) -> torch.Tensor:
    """All-reduce the input tensor across model parallel group."""
    return get_tp_group().all_reduce(input_)

tensor_model_parallel_gather

tensor_model_parallel_gather(
    input_: Tensor, dst: int = 0, dim: int = -1
) -> Optional[Tensor]

Gather the input tensor across model parallel group.

Source code in vllm/distributed/communication_op.py
def tensor_model_parallel_gather(input_: torch.Tensor,
                                 dst: int = 0,
                                 dim: int = -1) -> Optional[torch.Tensor]:
    """Gather the input tensor across model parallel group."""
    return get_tp_group().gather(input_, dst, dim)

tensor_model_parallel_reduce_scatter

tensor_model_parallel_reduce_scatter(
    input_: Tensor, dim: int = -1
) -> Tensor

Reduce-Scatter the input tensor across model parallel group.

Source code in vllm/distributed/communication_op.py
def tensor_model_parallel_reduce_scatter(input_: torch.Tensor,
                                         dim: int = -1) -> torch.Tensor:
    """Reduce-Scatter the input tensor across model parallel group."""
    return get_tp_group().reduce_scatter(input_, dim)