Skip to content

Commit 0789be1

Browse files
abukhoyeplatero97
authored andcommitted
[Bug-fix:] QEFFAutoModelForImageTextToText class docstring fixing (quic#372)
This pull request addresses the issue with the code block in the class docstring of `QEFFAutoModelForImageTextToText`. Previously, the docstring was not displaying correctly on `gh-pages` due to an error in the Python code block. --------- Signed-off-by: Abukhoyer Shaik <[email protected]>
1 parent 8009d1a commit 0789be1

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

tests/transformers/spd/test_pld_inference.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -145,9 +145,9 @@ def get_padded_input_len(input_len: int, prefill_seq_len: int, ctx_len: int):
145145
"""
146146
num_chunks = -(input_len // -prefill_seq_len) # ceil divide without float
147147
input_len_padded = num_chunks * prefill_seq_len # Convert input_len to a multiple of prefill_seq_len
148-
assert (
149-
input_len_padded <= ctx_len
150-
), "input_len rounded to nearest prefill_seq_len multiple should be less than ctx_len"
148+
assert input_len_padded <= ctx_len, (
149+
"input_len rounded to nearest prefill_seq_len multiple should be less than ctx_len"
150+
)
151151
return input_len_padded
152152

153153

tests/transformers/spd/test_spd_inference.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,9 @@ def get_padded_input_len(input_len: int, prefill_seq_len: int, ctx_len: int):
8686
"""
8787
num_chunks = -(input_len // -prefill_seq_len) # ceil divide without float
8888
input_len_padded = num_chunks * prefill_seq_len # Convert input_len to a multiple of prefill_seq_len
89-
assert (
90-
input_len_padded <= ctx_len
91-
), "input_len rounded to nearest prefill_seq_len multiple should be less than ctx_len"
89+
assert input_len_padded <= ctx_len, (
90+
"input_len rounded to nearest prefill_seq_len multiple should be less than ctx_len"
91+
)
9292
return input_len_padded
9393

9494

@@ -335,9 +335,9 @@ def test_spec_decode_inference(
335335
for prompt, generation in zip(prompts, batch_decode):
336336
print(f"{prompt=} {generation=}")
337337
# validation check
338-
assert mean_num_accepted_tokens == float(
339-
num_speculative_tokens + 1
340-
), f"mean number of accepted tokens is {mean_num_accepted_tokens} but should be {num_speculative_tokens + 1}"
338+
assert mean_num_accepted_tokens == float(num_speculative_tokens + 1), (
339+
f"mean number of accepted tokens is {mean_num_accepted_tokens} but should be {num_speculative_tokens + 1}"
340+
)
341341
del target_model_session
342342
del draft_model_session
343343
generated_ids = np.asarray(generated_ids[0]).flatten()

0 commit comments

Comments
 (0)