File tree 2 files changed +6
-4
lines changed
2 files changed +6
-4
lines changed Original file line number Diff line number Diff line change 1
1
# Config for EleutherEvalRecipe in eleuther_eval.py
2
2
#
3
3
# To launch, run the following command from root torchtune directory:
4
- # tune run eleuther_eval --config eleuther_evaluation tasks=["truthfulqa_mc2","hellaswag"]
4
+ # tune run eleuther_eval --config qwen2_5/evaluation
5
5
6
6
output_dir : ./ # Not needed
7
7
11
11
12
12
checkpointer :
13
13
_component_ : torchtune.training.FullModelHFCheckpointer
14
- checkpoint_dir : /tmp/Qwen2.5-0_5B -Instruct
14
+ checkpoint_dir : /tmp/Qwen2.5-0.5B -Instruct
15
15
checkpoint_files : [
16
16
model.safetensors,
17
17
]
@@ -21,8 +21,8 @@ checkpointer:
21
21
# Tokenizer
22
22
tokenizer :
23
23
_component_ : torchtune.models.qwen2_5.qwen2_5_tokenizer
24
- path : /tmp/Qwen2.5-0_5B -Instruct/vocab.json
25
- merges_file : /tmp/Qwen2.5-0_5B -Instruct/merges.txt
24
+ path : /tmp/Qwen2.5-0.5B -Instruct/vocab.json
25
+ merges_file : /tmp/Qwen2.5-0.5B -Instruct/merges.txt
26
26
max_seq_len : null
27
27
28
28
# Environment
Original file line number Diff line number Diff line change @@ -408,12 +408,14 @@ def _model_generate(
408
408
dtype = self ._dtype ,
409
409
decoder_max_seq_len = self .max_length ,
410
410
):
411
+
411
412
toks , _ = generate (
412
413
self .model ,
413
414
maybe_padded_context ,
414
415
max_generated_tokens = self .max_gen_toks ,
415
416
temperature = temperature ,
416
417
top_k = None ,
418
+ pad_id = self ._tokenizer .pad_id ,
417
419
stop_tokens = self ._tokenizer .stop_tokens ,
418
420
)
419
421
return toks [:bsz ]
You can’t perform that action at this time.
0 commit comments