Skip to content

Commit

Permalink
[Minor] Small fix to make distributed init logic in worker looks clea…
Browse files Browse the repository at this point in the history
  • Loading branch information
zhuohan123 authored Feb 18, 2024
1 parent 786b7f1 commit 537c975
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions vllm/worker/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,6 @@ def init_model(self, cupy_port: Optional[int] = None) -> None:
# Initialize the distributed environment.
init_distributed_environment(self.parallel_config, self.rank,
cupy_port, self.distributed_init_method)
if not self.parallel_config.disable_custom_all_reduce:
init_custom_ar()
# Initialize the model.
set_random_seed(self.model_config.seed)

Expand Down Expand Up @@ -288,6 +286,10 @@ def init_distributed_environment(
ensure_model_parallel_initialized(parallel_config.tensor_parallel_size,
parallel_config.pipeline_parallel_size)

# Initialize a custom fast all-reduce implementation.
if not parallel_config.disable_custom_all_reduce:
init_custom_ar()


def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
# Check if the GPU supports the dtype.
Expand Down

0 comments on commit 537c975

Please sign in to comment.