Skip to content

Commit 4c63262

Browse files
committed
Formating change
Signed-off-by: YuhanBai <[email protected]>
1 parent 1b1f727 commit 4c63262

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

vllm_ascend/worker/model_runner_v1.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4493,7 +4493,7 @@ def _to_list(self, sampled_token_ids: torch.Tensor) -> list[np.ndarray]:
44934493
return [row for row in pinned.numpy()]
44944494

44954495
def _do_async_exponential(self, default_stream, logits_indices):
4496-
# Calculating exponential randoms in a different stream
4496+
# Calculating exponential randoms in a different stream
44974497
# and overlapping with model executing.
44984498
with torch.npu.stream(self._async_exponential_stream):
44994499
self._async_exponential_stream.wait_stream(default_stream)
@@ -4507,4 +4507,4 @@ def _do_async_exponential(self, default_stream, logits_indices):
45074507
for i, generator in generators.items():
45084508
q[i].exponential_(generator=generator)
45094509
self._async_exponential_event.record()
4510-
self.sampler.set_q_event(q, self._async_exponential_event)
4510+
self.sampler.set_q_event(q, self._async_exponential_event)

0 commit comments

Comments
 (0)