Skip to content

Commit 816be06

Browse files
authored
update evaluation_strategy to eval_strategy in all examples and transformers_doc en cases (#570)
* update `evaluation_strategy` to `eval_strategy` in all examples and transformers_doc cases Signed-off-by: YAO Matrix <[email protected]> * update Signed-off-by: YAO Matrix <[email protected]> * fix typo Signed-off-by: YAO Matrix <[email protected]> --------- Signed-off-by: YAO Matrix <[email protected]>
1 parent 9743875 commit 816be06

File tree

67 files changed

+79
-79
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+79
-79
lines changed

examples/audio_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1370,7 +1370,7 @@
13701370
"\n",
13711371
"args = TrainingArguments(\n",
13721372
" f\"{model_name}-finetuned-ks\",\n",
1373-
" evaluation_strategy = \"epoch\",\n",
1373+
" eval_strategy = \"epoch\",\n",
13741374
" save_strategy = \"epoch\",\n",
13751375
" learning_rate=3e-5,\n",
13761376
" per_device_train_batch_size=batch_size,\n",

examples/idefics/finetune_image_captioning.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def ds_transforms(example_batch):
104104
gradient_accumulation_steps=2,
105105
dataloader_pin_memory=False,
106106
save_total_limit=3,
107-
evaluation_strategy="steps",
107+
eval_strategy="steps",
108108
save_strategy="steps",
109109
save_steps=1000, # don't save until ready...
110110
eval_steps=40,

examples/idefics/finetune_image_captioning_peft.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -765,7 +765,7 @@
765765
" gradient_accumulation_steps=8,\n",
766766
" dataloader_pin_memory=False,\n",
767767
" save_total_limit=3,\n",
768-
" evaluation_strategy=\"steps\",\n",
768+
" eval_strategy=\"steps\",\n",
769769
" save_strategy=\"steps\",\n",
770770
" save_steps=40,\n",
771771
" eval_steps=20,\n",

examples/idefics/idefics_zero3_finetuning/idefics_zero3_finetuning.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def ds_transforms(example_batch):
113113
# gradient_checkpointing=True, # Uncomment if OOM
114114
dataloader_pin_memory=False,
115115
save_total_limit=3,
116-
evaluation_strategy="steps",
116+
eval_strategy="steps",
117117
save_strategy="steps",
118118
save_steps=40,
119119
eval_steps=20,

examples/image_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1221,7 +1221,7 @@
12211221
"args = TrainingArguments(\n",
12221222
" f\"{model_name}-finetuned-eurosat\",\n",
12231223
" remove_unused_columns=False,\n",
1224-
" evaluation_strategy = \"epoch\",\n",
1224+
" eval_strategy = \"epoch\",\n",
12251225
" save_strategy = \"epoch\",\n",
12261226
" learning_rate=5e-5,\n",
12271227
" per_device_train_batch_size=batch_size,\n",

examples/image_classification_albumentations.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1305,7 +1305,7 @@
13051305
"args = TrainingArguments(\n",
13061306
" f\"{model_name}-finetuned-eurosat-albumentations\",\n",
13071307
" remove_unused_columns=False,\n",
1308-
" evaluation_strategy = \"epoch\",\n",
1308+
" eval_strategy = \"epoch\",\n",
13091309
" save_strategy = \"epoch\",\n",
13101310
" learning_rate=5e-5,\n",
13111311
" per_device_train_batch_size=batch_size,\n",

examples/image_classification_kornia.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1432,7 +1432,7 @@
14321432
"args = TrainingArguments(\n",
14331433
" f\"{model_name}-finetuned-eurosat-kornia\",\n",
14341434
" remove_unused_columns=False,\n",
1435-
" evaluation_strategy = \"epoch\",\n",
1435+
" eval_strategy = \"epoch\",\n",
14361436
" save_strategy = \"epoch\",\n",
14371437
" learning_rate=5e-5,\n",
14381438
" per_device_train_batch_size=batch_size,\n",

examples/language_modeling.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -709,7 +709,7 @@
709709
"model_name = model_checkpoint.split(\"/\")[-1]\n",
710710
"training_args = TrainingArguments(\n",
711711
" f\"{model_name}-finetuned-wikitext2\",\n",
712-
" evaluation_strategy = \"epoch\",\n",
712+
" eval_strategy = \"epoch\",\n",
713713
" learning_rate=2e-5,\n",
714714
" weight_decay=0.01,\n",
715715
" push_to_hub=True,\n",
@@ -1064,7 +1064,7 @@
10641064
"model_name = model_checkpoint.split(\"/\")[-1]\n",
10651065
"training_args = TrainingArguments(\n",
10661066
" f\"{model_name}-finetuned-wikitext2\",\n",
1067-
" evaluation_strategy = \"epoch\",\n",
1067+
" eval_strategy = \"epoch\",\n",
10681068
" learning_rate=2e-5,\n",
10691069
" weight_decay=0.01,\n",
10701070
" push_to_hub=True,\n",

examples/language_modeling_from_scratch.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -703,7 +703,7 @@
703703
"source": [
704704
"training_args = TrainingArguments(\n",
705705
" f\"{model_checkpoint}-wikitext2\",\n",
706-
" evaluation_strategy = \"epoch\",\n",
706+
" eval_strategy = \"epoch\",\n",
707707
" learning_rate=2e-5,\n",
708708
" weight_decay=0.01,\n",
709709
" push_to_hub=True\n",
@@ -1342,7 +1342,7 @@
13421342
"source": [
13431343
"training_args = TrainingArguments(\n",
13441344
" \"test-clm\",\n",
1345-
" evaluation_strategy = \"epoch\",\n",
1345+
" eval_strategy = \"epoch\",\n",
13461346
" learning_rate=2e-5,\n",
13471347
" weight_decay=0.01,\n",
13481348
" push_to_hub=True,\n",

examples/multi_lingual_speech_recognition.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -2455,7 +2455,7 @@
24552455
" group_by_length=True,\n",
24562456
" per_device_train_batch_size=batch_size,\n",
24572457
" gradient_accumulation_steps=2,\n",
2458-
" evaluation_strategy=\"steps\",\n",
2458+
" eval_strategy=\"steps\",\n",
24592459
" num_train_epochs=30,\n",
24602460
" gradient_checkpointing=True,\n",
24612461
" fp16=True,\n",

examples/multiple_choice.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -960,7 +960,7 @@
960960
"model_name = model_checkpoint.split(\"/\")[-1]\n",
961961
"args = TrainingArguments(\n",
962962
" f\"{model_name}-finetuned-swag\",\n",
963-
" evaluation_strategy = \"epoch\",\n",
963+
" eval_strategy = \"epoch\",\n",
964964
" learning_rate=5e-5,\n",
965965
" per_device_train_batch_size=batch_size,\n",
966966
" per_device_eval_batch_size=batch_size,\n",

examples/nucleotide_transformer_dna_sequence_modelling.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -672,7 +672,7 @@
672672
"args_promoter = TrainingArguments(\n",
673673
" f\"{model_name}-finetuned-NucleotideTransformer\",\n",
674674
" remove_unused_columns=False,\n",
675-
" evaluation_strategy=\"steps\",\n",
675+
" eval_strategy=\"steps\",\n",
676676
" save_strategy=\"steps\",\n",
677677
" learning_rate=1e-5,\n",
678678
" per_device_train_batch_size=batch_size,\n",
@@ -1348,7 +1348,7 @@
13481348
"args_enhancers = TrainingArguments(\n",
13491349
" f\"{model_name}-finetuned-NucleotideTransformer\",\n",
13501350
" remove_unused_columns=False,\n",
1351-
" evaluation_strategy=\"steps\",\n",
1351+
" eval_strategy=\"steps\",\n",
13521352
" save_strategy=\"steps\",\n",
13531353
" learning_rate=1e-5,\n",
13541354
" per_device_train_batch_size=batch_size,\n",

examples/nucleotide_transformer_dna_sequence_modelling_with_peft.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -882,7 +882,7 @@
882882
"args_promoter = TrainingArguments(\n",
883883
" f\"{model_name}-finetuned-lora-NucleotideTransformer\",\n",
884884
" remove_unused_columns=False,\n",
885-
" evaluation_strategy=\"steps\",\n",
885+
" eval_strategy=\"steps\",\n",
886886
" save_strategy=\"steps\",\n",
887887
" learning_rate=5e-4,\n",
888888
" per_device_train_batch_size=batch_size,\n",
@@ -1901,7 +1901,7 @@
19011901
"args_enhancers = TrainingArguments(\n",
19021902
" f\"{model_name}-finetuned-lora-NucleotideTransformer\",\n",
19031903
" remove_unused_columns=False,\n",
1904-
" evaluation_strategy=\"steps\",\n",
1904+
" eval_strategy=\"steps\",\n",
19051905
" save_strategy=\"steps\",\n",
19061906
" learning_rate=5e-4,\n",
19071907
" per_device_train_batch_size=batch_size,\n",

examples/patch_tsmixer.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -629,7 +629,7 @@
629629
" learning_rate=0.001,\n",
630630
" num_train_epochs=100, # For a quick test of this notebook, set it to 1\n",
631631
" do_eval=True,\n",
632-
" evaluation_strategy=\"epoch\",\n",
632+
" eval_strategy=\"epoch\",\n",
633633
" per_device_train_batch_size=batch_size,\n",
634634
" per_device_eval_batch_size=batch_size,\n",
635635
" dataloader_num_workers=num_workers,\n",
@@ -1056,7 +1056,7 @@
10561056
" learning_rate=0.0001,\n",
10571057
" num_train_epochs=100,\n",
10581058
" do_eval=True,\n",
1059-
" evaluation_strategy=\"epoch\",\n",
1059+
" eval_strategy=\"epoch\",\n",
10601060
" per_device_train_batch_size=batch_size,\n",
10611061
" per_device_eval_batch_size=batch_size,\n",
10621062
" dataloader_num_workers=num_workers,\n",

examples/patch_tst.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@
796796
" # learning_rate=0.001,\n",
797797
" num_train_epochs=100,\n",
798798
" do_eval=True,\n",
799-
" evaluation_strategy=\"epoch\",\n",
799+
" eval_strategy=\"epoch\",\n",
800800
" per_device_train_batch_size=batch_size,\n",
801801
" per_device_eval_batch_size=batch_size,\n",
802802
" dataloader_num_workers=num_workers,\n",
@@ -1120,7 +1120,7 @@
11201120
" learning_rate=0.0001,\n",
11211121
" num_train_epochs=100,\n",
11221122
" do_eval=True,\n",
1123-
" evaluation_strategy=\"epoch\",\n",
1123+
" eval_strategy=\"epoch\",\n",
11241124
" per_device_train_batch_size=batch_size,\n",
11251125
" per_device_eval_batch_size=batch_size,\n",
11261126
" dataloader_num_workers=num_workers,\n",

examples/protein_language_modeling.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -1069,7 +1069,7 @@
10691069
"\n",
10701070
"args = TrainingArguments(\n",
10711071
" f\"{model_name}-finetuned-localization\",\n",
1072-
" evaluation_strategy = \"epoch\",\n",
1072+
" eval_strategy = \"epoch\",\n",
10731073
" save_strategy = \"epoch\",\n",
10741074
" learning_rate=2e-5,\n",
10751075
" per_device_train_batch_size=batch_size,\n",
@@ -2084,7 +2084,7 @@
20842084
"\n",
20852085
"args = TrainingArguments(\n",
20862086
" f\"{model_name}-finetuned-secondary-structure\",\n",
2087-
" evaluation_strategy = \"epoch\",\n",
2087+
" eval_strategy = \"epoch\",\n",
20882088
" save_strategy = \"epoch\",\n",
20892089
" learning_rate=1e-4,\n",
20902090
" per_device_train_batch_size=batch_size,\n",

examples/question_answering.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1189,7 +1189,7 @@
11891189
"model_name = model_checkpoint.split(\"/\")[-1]\n",
11901190
"args = TrainingArguments(\n",
11911191
" f\"{model_name}-finetuned-squad\",\n",
1192-
" evaluation_strategy = \"epoch\",\n",
1192+
" eval_strategy = \"epoch\",\n",
11931193
" learning_rate=2e-5,\n",
11941194
" per_device_train_batch_size=batch_size,\n",
11951195
" per_device_eval_batch_size=batch_size,\n",

examples/question_answering_ort.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@
632632
"model_name = model_checkpoint.split(\"/\")[-1]\n",
633633
"args = ORTTrainingArguments(\n",
634634
" output_dir=f\"{model_name}-finetuned-squad\",\n",
635-
" evaluation_strategy = \"epoch\",\n",
635+
" eval_strategy = \"epoch\",\n",
636636
" learning_rate=2e-5,\n",
637637
" per_device_train_batch_size=batch_size,\n",
638638
" per_device_eval_batch_size=batch_size,\n",
@@ -963,7 +963,7 @@
963963
" model_name = model_checkpoint.split(\"/\")[-1]\n",
964964
" args = ORTTrainingArguments(\n",
965965
" output_dir=f\"{model_name}-finetuned-squad\",\n",
966-
" evaluation_strategy = \"epoch\",\n",
966+
" eval_strategy = \"epoch\",\n",
967967
" learning_rate=2e-5,\n",
968968
" per_device_train_batch_size=batch_size,\n",
969969
" per_device_eval_batch_size=batch_size,\n",

examples/semantic_segmentation.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -854,7 +854,7 @@
854854
" per_device_train_batch_size=batch_size,\n",
855855
" per_device_eval_batch_size=batch_size,\n",
856856
" save_total_limit=3,\n",
857-
" evaluation_strategy=\"steps\",\n",
857+
" eval_strategy=\"steps\",\n",
858858
" save_strategy=\"steps\",\n",
859859
" save_steps=20,\n",
860860
" eval_steps=20,\n",

examples/speech_recognition.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -2005,7 +2005,7 @@
20052005
" output_dir=repo_name,\n",
20062006
" group_by_length=True,\n",
20072007
" per_device_train_batch_size=32,\n",
2008-
" evaluation_strategy=\"steps\",\n",
2008+
" eval_strategy=\"steps\",\n",
20092009
" num_train_epochs=30,\n",
20102010
" fp16=True,\n",
20112011
" gradient_checkpointing=True,\n",

examples/summarization.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -834,7 +834,7 @@
834834
"model_name = model_checkpoint.split(\"/\")[-1]\n",
835835
"args = Seq2SeqTrainingArguments(\n",
836836
" f\"{model_name}-finetuned-xsum\",\n",
837-
" evaluation_strategy = \"epoch\",\n",
837+
" eval_strategy = \"epoch\",\n",
838838
" learning_rate=2e-5,\n",
839839
" per_device_train_batch_size=batch_size,\n",
840840
" per_device_eval_batch_size=batch_size,\n",

examples/summarization_ort.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -638,7 +638,7 @@
638638
"model_name = model_checkpoint.split(\"/\")[-1]\n",
639639
"args = ORTSeq2SeqTrainingArguments(\n",
640640
" f\"{model_name}-finetuned-xsum\",\n",
641-
" evaluation_strategy = \"epoch\",\n",
641+
" eval_strategy = \"epoch\",\n",
642642
" learning_rate=learning_rate,\n",
643643
" per_device_train_batch_size=batch_size,\n",
644644
" per_device_eval_batch_size=batch_size,\n",

examples/text_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -994,7 +994,7 @@
994994
"\n",
995995
"args = TrainingArguments(\n",
996996
" f\"{model_name}-finetuned-{task}\",\n",
997-
" evaluation_strategy = \"epoch\",\n",
997+
" eval_strategy = \"epoch\",\n",
998998
" save_strategy = \"epoch\",\n",
999999
" learning_rate=2e-5,\n",
10001000
" per_device_train_batch_size=batch_size,\n",

examples/text_classification_ort.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -899,7 +899,7 @@
899899
"\n",
900900
"args = ORTTrainingArguments(\n",
901901
" f\"{model_name}-finetuned-{task}\",\n",
902-
" evaluation_strategy = \"epoch\",\n",
902+
" eval_strategy = \"epoch\",\n",
903903
" save_strategy = \"epoch\",\n",
904904
" learning_rate=2e-5,\n",
905905
" per_device_train_batch_size=batch_size,\n",

examples/text_classification_quantization_inc.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@
506506
" output_dir = save_directory,\n",
507507
" do_train=True,\n",
508508
" do_eval=False,\n",
509-
" evaluation_strategy = \"epoch\",\n",
509+
" eval_strategy = \"epoch\",\n",
510510
" save_strategy = \"epoch\",\n",
511511
" learning_rate=2e-5,\n",
512512
" per_device_train_batch_size=batch_size,\n",

examples/token_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1053,7 +1053,7 @@
10531053
"model_name = model_checkpoint.split(\"/\")[-1]\n",
10541054
"args = TrainingArguments(\n",
10551055
" f\"{model_name}-finetuned-{task}\",\n",
1056-
" evaluation_strategy = \"epoch\",\n",
1056+
" eval_strategy = \"epoch\",\n",
10571057
" learning_rate=2e-5,\n",
10581058
" per_device_train_batch_size=batch_size,\n",
10591059
" per_device_eval_batch_size=batch_size,\n",

examples/translation.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -803,7 +803,7 @@
803803
"model_name = model_checkpoint.split(\"/\")[-1]\n",
804804
"args = Seq2SeqTrainingArguments(\n",
805805
" f\"{model_name}-finetuned-{source_lang}-to-{target_lang}\",\n",
806-
" evaluation_strategy = \"epoch\",\n",
806+
" eval_strategy = \"epoch\",\n",
807807
" learning_rate=2e-5,\n",
808808
" per_device_train_batch_size=batch_size,\n",
809809
" per_device_eval_batch_size=batch_size,\n",

examples/video_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1086,7 +1086,7 @@
10861086
"args = TrainingArguments(\n",
10871087
" new_model_name,\n",
10881088
" remove_unused_columns=False,\n",
1089-
" evaluation_strategy=\"epoch\",\n",
1089+
" eval_strategy=\"epoch\",\n",
10901090
" save_strategy=\"epoch\",\n",
10911091
" learning_rate=5e-5,\n",
10921092
" per_device_train_batch_size=batch_size,\n",

transformers_doc/en/asr.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -526,7 +526,7 @@
526526
" gradient_checkpointing=True,\n",
527527
" fp16=True,\n",
528528
" group_by_length=True,\n",
529-
" evaluation_strategy=\"steps\",\n",
529+
" eval_strategy=\"steps\",\n",
530530
" per_device_eval_batch_size=8,\n",
531531
" save_steps=1000,\n",
532532
" eval_steps=1000,\n",

transformers_doc/en/audio_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,7 @@
471471
"source": [
472472
"training_args = TrainingArguments(\n",
473473
" output_dir=\"my_awesome_mind_model\",\n",
474-
" evaluation_strategy=\"epoch\",\n",
474+
" eval_strategy=\"epoch\",\n",
475475
" save_strategy=\"epoch\",\n",
476476
" learning_rate=3e-5,\n",
477477
" per_device_train_batch_size=32,\n",

transformers_doc/en/document_question_answering.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -680,7 +680,7 @@
680680
" num_train_epochs=20,\n",
681681
" save_steps=200,\n",
682682
" logging_steps=50,\n",
683-
" evaluation_strategy=\"steps\",\n",
683+
" eval_strategy=\"steps\",\n",
684684
" learning_rate=5e-5,\n",
685685
" save_total_limit=2,\n",
686686
" remove_unused_columns=False,\n",

transformers_doc/en/image_captioning.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@
315315
" per_device_eval_batch_size=32,\n",
316316
" gradient_accumulation_steps=2,\n",
317317
" save_total_limit=3,\n",
318-
" evaluation_strategy=\"steps\",\n",
318+
" eval_strategy=\"steps\",\n",
319319
" eval_steps=50,\n",
320320
" save_strategy=\"steps\",\n",
321321
" save_steps=50,\n",

transformers_doc/en/image_classification.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -552,7 +552,7 @@
552552
"training_args = TrainingArguments(\n",
553553
" output_dir=\"my_awesome_food_model\",\n",
554554
" remove_unused_columns=False,\n",
555-
" evaluation_strategy=\"epoch\",\n",
555+
" eval_strategy=\"epoch\",\n",
556556
" save_strategy=\"epoch\",\n",
557557
" learning_rate=5e-5,\n",
558558
" per_device_train_batch_size=16,\n",

transformers_doc/en/language_modeling.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -467,7 +467,7 @@
467467
"source": [
468468
"training_args = TrainingArguments(\n",
469469
" output_dir=\"my_awesome_eli5_clm-model\",\n",
470-
" evaluation_strategy=\"epoch\",\n",
470+
" eval_strategy=\"epoch\",\n",
471471
" learning_rate=2e-5,\n",
472472
" weight_decay=0.01,\n",
473473
" push_to_hub=True,\n",

transformers_doc/en/masked_language_modeling.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,7 @@
458458
"source": [
459459
"training_args = TrainingArguments(\n",
460460
" output_dir=\"my_awesome_eli5_mlm_model\",\n",
461-
" evaluation_strategy=\"epoch\",\n",
461+
" eval_strategy=\"epoch\",\n",
462462
" learning_rate=2e-5,\n",
463463
" num_train_epochs=3,\n",
464464
" weight_decay=0.01,\n",

transformers_doc/en/multilingual.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@
378378
}
379379
],
380380
"source": [
381-
"generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id(\"en_XX\"))\n",
381+
"generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id[\"en_XX\"])\n",
382382
"tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)"
383383
]
384384
},

0 commit comments

Comments
 (0)