Skip to content

Commit

Permalink
Fix log message in scheduler (vllm-project#652)
Browse files Browse the repository at this point in the history
  • Loading branch information
LiuXiaoxuanPKU authored Aug 2, 2023
1 parent 64f23c2 commit 20044ca
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions vllm/core/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,13 +190,13 @@ def _schedule(
break

num_prompt_tokens = seq_group.get_seqs()[0].get_len()
if num_prompt_tokens > min(
self.scheduler_config.max_model_len,
self.scheduler_config.max_num_batched_tokens):
prompt_limit = min(
self.scheduler_config.max_model_len,
self.scheduler_config.max_num_batched_tokens)
if num_prompt_tokens > prompt_limit:
logger.warning(
f"Input prompt ({num_prompt_tokens} tokens) is too long"
" and exceeds limit of "
f"{self.scheduler_config.max_model_len}")
f" and exceeds limit of {prompt_limit}")
for seq in seq_group.get_seqs():
seq.status = SequenceStatus.FINISHED_IGNORED
ignored_seq_groups.append(seq_group)
Expand Down

0 comments on commit 20044ca

Please sign in to comment.