Skip to content

[Llama 3.1 405B] RCPs #404

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Feb 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion mlperf_logging/benchmark_meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
'gnn' : 10,
'rgat': 10,
'llama2_70b_lora': 10,
'llama31_405b': 3,
},

'hpc' : {
Expand Down Expand Up @@ -140,7 +141,8 @@
'retinanet',
'stable_diffusion',
'llama2_70b_lora',
'rgat'
'rgat',
'llama31_405b'
]
},

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
- KEY:
NAME: submission_benchmark
REQ: EXACTLY_ONE
CHECK: " v['value'] in ['retinanet', 'stable_diffusion', 'dlrm_dcnv2', 'bert', 'rgat', 'llama2_70b_lora'] "
CHECK: " v['value'] in ['retinanet', 'stable_diffusion', 'dlrm_dcnv2', 'bert', 'rgat', 'llama2_70b_lora', 'llama31_405b'] "
POST: " enqueue_config('training_5.0.0/closed_{}.yaml'.format(v['value'])) "

- KEY:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
- KEY:
NAME: global_batch_size
REQ: EXACTLY_ONE
POST: >
s['global_batch_size'] = v['value']

- KEY:
NAME: max_sequence_length
REQ: EXACTLY_ONE
CHECK: " v['value'] == 8192 "

- KEY:
NAME: opt_name
REQ: EXACTLY_ONE
CHECK: " v['value'] == 'adam' "

- KEY:
NAME: opt_base_learning_rate
REQ: EXACTLY_ONE
CHECK: " v['value'] * s['global_batch_size'] == 1152 * 8e-5 "

- KEY:
NAME: opt_end_learning_rate
REQ: EXACTLY_ONE

- KEY:
NAME: opt_learning_rate_decay_steps
REQ: EXACTLY_ONE

- KEY:
NAME: opt_learning_rate_warmup_steps
REQ: EXACTLY_ONE

- KEY:
NAME: opt_learning_rate_decay_schedule
REQ: EXACTLY_ONE
CHECK: " v['value'] == 'cosine with linear warmup' "

- KEY:
NAME: opt_adam_beta_1
REQ: EXACTLY_ONE
CHECK: " v['value'] == 0.9 "

- KEY:
NAME: opt_adam_beta_2
REQ: EXACTLY_ONE
CHECK: " v['value'] == 0.95 "

- KEY:
NAME: opt_adam_epsilon
REQ: EXACTLY_ONE
CHECK: " v['value'] == 1e-05 "

- KEY:
NAME: opt_gradient_clip_norm
REQ: EXACTLY_ONE
CHECK: " v['value'] == 1.0 "

- KEY:
NAME: gradient_accumulation_steps
REQ: EXACTLY_ONE
CHECK: " v['value'] > 0 "

- KEY:
NAME: eval_samples
REQ: EXACTLY_ONE
CHECK: " v['value'] == 5760 "

- KEY:
NAME: eval_accuracy
REQ: AT_LEAST_ONE
CHECK:
- "'epoch_num' in v['metadata']"
ATLEAST_ONE_CHECK: "(v['value'] <= 5.6) and v['value'] > 0.0"

- KEY:
NAME: init_checkpoint_step
REQ: EXACTLY_ONE
CHECK: " v['value'] == 0 "

Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
- KEY:
NAME: submission_benchmark
REQ: EXACTLY_ONE
CHECK: " v['value'] in ['retinanet', 'stable_diffusion', 'dlrm_dcnv2', 'bert', 'rgat', 'llama2_70b_lora'] "
CHECK: " v['value'] in ['retinanet', 'stable_diffusion', 'dlrm_dcnv2', 'bert', 'rgat', 'llama2_70b_lora', 'llama31_405b'] "
POST: " enqueue_config('training_5.0.0/open_{}.yaml'.format(v['value'])) "
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
- KEY:
NAME: global_batch_size
REQ: EXACTLY_ONE
POST: >
s['global_batch_size'] = v['value']

- KEY:
NAME: max_sequence_length
REQ: EXACTLY_ONE
CHECK: " v['value'] == 8192 "

- KEY:
NAME: opt_name
REQ: EXACTLY_ONE
CHECK: " v['value'] == 'adam' "

- KEY:
NAME: opt_base_learning_rate
REQ: EXACTLY_ONE

- KEY:
NAME: opt_end_learning_rate
REQ: EXACTLY_ONE

- KEY:
NAME: opt_learning_rate_decay_steps
REQ: EXACTLY_ONE

- KEY:
NAME: opt_learning_rate_warmup_steps
REQ: EXACTLY_ONE

- KEY:
NAME: opt_learning_rate_decay_schedule
REQ: EXACTLY_ONE

- KEY:
NAME: opt_adam_beta_1
REQ: EXACTLY_ONE

- KEY:
NAME: opt_adam_beta_2
REQ: EXACTLY_ONE

- KEY:
NAME: opt_adam_epsilon
REQ: EXACTLY_ONE

- KEY:
NAME: opt_gradient_clip_norm
REQ: EXACTLY_ONE

- KEY:
NAME: gradient_accumulation_steps
REQ: EXACTLY_ONE
CHECK: " v['value'] > 0 "

- KEY:
NAME: eval_samples
REQ: EXACTLY_ONE
CHECK: " v['value'] == 5760 "

- KEY:
NAME: eval_accuracy
REQ: AT_LEAST_ONE
CHECK:
- "'epoch_num' in v['metadata']"
ATLEAST_ONE_CHECK: "(v['value'] <= 5.6) and v['value'] > 0.0"

- KEY:
NAME: init_checkpoint_step
REQ: EXACTLY_ONE
CHECK: " v['value'] == 0 "

1 change: 1 addition & 0 deletions mlperf_logging/mllog/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
LLAMA2_70B_LORA = "llama2_70b_lora"
GNN = "gnn"
RGAT = "rgat"
LLAMA31_405B = "llama31_405b"

# Constant values - model info
ADAGRAD = "adagrad"
Expand Down
1 change: 1 addition & 0 deletions mlperf_logging/rcp_checker/rcp_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
'gnn': 10,
'rgat': 10,
'llama2_70b_lora': 10,
'llama31_405b': 3,
},
"hpc": {
'cosmoflow': 10,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
{
"llama31_405b_ref_1152":
{
"Benchmark": "llama31_405b",
"Creator": "NVIDIA",
"When": "Reference RCPs before 5.0 submission",
"Platform": "288xDGX-H100",
"BS": 1152,
"Hyperparams": {
"opt_base_learning_rate": 8e-05,
"opt_learning_rate_warmup_steps": 8000,
"gradient_accumulation_steps": 144
},
"Epochs to converge": [
322560,322560,322560,
322560,322560,322560
]
},

"llama31_405b_ref_2304":
{
"Benchmark": "llama31_405b",
"Creator": "NVIDIA",
"When": "Reference RCPs before 5.0 submission",
"Platform": "288xDGX-H100",
"BS": 2304,
"Hyperparams": {
"opt_base_learning_rate": 16e-05,
"opt_learning_rate_warmup_steps": 4000,
"gradient_accumulation_steps": 288
},
"Epochs to converge": [
368640,368640,368640,
368640,414720,414720
]
},
"llama31_405b_ref_4608":
{
"Benchmark": "llama31_405b",
"Creator": "NVIDIA",
"When": "Reference RCPs before 5.0 submission",
"Platform": "288xDGX-H100",
"BS": 4608,
"Hyperparams": {
"opt_base_learning_rate": 32e-05,
"opt_learning_rate_warmup_steps": 2000,
"gradient_accumulation_steps": 576
},
"Epochs to converge": [
460800,460800,506880,
506880,506880,506880
]
}
}

1 change: 1 addition & 0 deletions mlperf_logging/result_summarizer/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ columns:
stable_diffusion: ["Benchmark results (minutes)", "Text to image", "Laion 400m and Coco-2017", "StableDiffusion"]
llama2_70b_lora: ["Benchmark results (minutes)", "LLM-Finetune", "SCROLSS Gov Report", "LLama2-70B-LoRA"]
rgat: ["Benchmark results (minutes)", "Graph node classification", "IGBH-Full", "R-GAT"]
llama31_405b: ["Benchmark results (minutes)", "LLM", "C4", "Llama31-405B"]
default: [" ", " ", " "]

hpc:
Expand Down