diff --git a/requirements.txt b/requirements.txt index 6a29adb..491497f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ datasets>=2.11.0 +deepspeed>=0.9.2 hydra-core>=1.3.2 tokenizers>=0.13.2 torch>=2.0.0 diff --git a/src/config/trainer/default.yaml b/src/config/trainer/default.yaml index b58630f..cbd971f 100644 --- a/src/config/trainer/default.yaml +++ b/src/config/trainer/default.yaml @@ -19,3 +19,4 @@ fp16: false lr_scheduler_type: cosine warmup_ratio: 0.15 evaluation_strategy: steps +deepspeed_config: configs/trainer/zero.json diff --git a/src/config/trainer/zero.json b/src/config/trainer/zero.json new file mode 100644 index 0000000..fc2ba3d --- /dev/null +++ b/src/config/trainer/zero.json @@ -0,0 +1,46 @@ +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +}