From 7be8e60564886490f99e64e46d4aa29ac74908a0 Mon Sep 17 00:00:00 2001 From: Enwei Zhu <21126786+syuoni@users.noreply.github.com> Date: Thu, 20 Mar 2025 13:30:15 +0000 Subject: [PATCH] fix Signed-off-by: Enwei Zhu <21126786+syuoni@users.noreply.github.com> --- examples/llm-api/llm_logits_processor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/llm-api/llm_logits_processor.py b/examples/llm-api/llm_logits_processor.py index 0195dddd31..933be9f49a 100644 --- a/examples/llm-api/llm_logits_processor.py +++ b/examples/llm-api/llm_logits_processor.py @@ -9,7 +9,7 @@ # The recommended way to create a customized logits processor: -# * Subclass this class and implement the processing logics in the __call__ method. +# * Subclass LogitsProcessor and implement the processing logics in the __call__ method. # * Create an instance and pass to SamplingParams. # Alternatively, you can create any callable with the same signature with the __call__ method. # This simple callback will output a specific token at each step irrespective of prompt. @@ -32,7 +32,7 @@ def __call__(self, req_id: int, logits: torch.Tensor, # The recommended way to create a customized batched logits processor: -# * Subclass this class and implement the processing logics in the __call__ method. +# * Subclass BatchedLogitsProcessor and implement the processing logics in the __call__ method. # * Create an instance and pass to LLM. # Alternatively, you can create any callable with the same signature with the __call__ method. # A batched logits processor's arguments for all requests in a batch are made available as lists.