diff --git a/examples/llm-api/llm_logits_processor.py b/examples/llm-api/llm_logits_processor.py index 0195dddd31..933be9f49a 100644 --- a/examples/llm-api/llm_logits_processor.py +++ b/examples/llm-api/llm_logits_processor.py @@ -9,7 +9,7 @@ # The recommended way to create a customized logits processor: -# * Subclass this class and implement the processing logics in the __call__ method. +# * Subclass LogitsProcessor and implement the processing logics in the __call__ method. # * Create an instance and pass to SamplingParams. # Alternatively, you can create any callable with the same signature with the __call__ method. # This simple callback will output a specific token at each step irrespective of prompt. @@ -32,7 +32,7 @@ def __call__(self, req_id: int, logits: torch.Tensor, # The recommended way to create a customized batched logits processor: -# * Subclass this class and implement the processing logics in the __call__ method. +# * Subclass BatchedLogitsProcessor and implement the processing logics in the __call__ method. # * Create an instance and pass to LLM. # Alternatively, you can create any callable with the same signature with the __call__ method. # A batched logits processor's arguments for all requests in a batch are made available as lists.