From 686417ebadeec668f26c834679e7a2fcfa07de6d Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 10 Feb 2025 14:30:48 -0800 Subject: [PATCH 01/12] update attention-smithy class name --- scripts/1_train_model.py | 6 +++--- scripts/1_train_model__command_lines.py | 6 +++--- scripts/2_evaluate_model__small.py | 6 +++--- scripts/model_script_for_nas.py | 6 +++--- src/machine_translation/MachineTranslationModel.py | 12 ++++++------ 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/scripts/1_train_model.py b/scripts/1_train_model.py index ae9a321..f202e90 100644 --- a/scripts/1_train_model.py +++ b/scripts/1_train_model.py @@ -6,7 +6,7 @@ from pytorch_lightning.callbacks import ModelCheckpoint from machine_translation import MachineTranslationModel from machine_translation.data import MachineTranslationDataModule -from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, NumericEmbeddingFacade +from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, NumericEmbeddingManager from attention_smithy.components import MultiheadAttention, FeedForwardNetwork from attention_smithy.attention import StandardAttentionMethod from attention_smithy.utils import seed_everything @@ -64,7 +64,7 @@ def train_model( ) sinusoidal_position_embedding = SinusoidalPositionEmbedding(embed_dim) - numeric_embedding_facade = NumericEmbeddingFacade(sinusoidal_position=sinusoidal_position_embedding) + numeric_embedding_manager = NumericEmbeddingManager(sinusoidal_position=sinusoidal_position_embedding) generic_attention = MultiheadAttention(embedding_dimension = embed_dim, number_of_heads = num_heads, attention_method = StandardAttentionMethod(dropout)) decoder_self_attention = MultiheadAttention(embedding_dimension = embed_dim, number_of_heads = num_heads, attention_method = StandardAttentionMethod(dropout, is_causal_masking=True)) feedforward_network = FeedForwardNetwork(embed_dim, dim_feedforward, 'relu', dropout) @@ -76,7 +76,7 @@ def train_model( decoder_self_attention=decoder_self_attention, decoder_cross_attention=generic_attention, feedforward_network=feedforward_network, - numeric_embedding_facade=numeric_embedding_facade, + numeric_embedding_manager=numeric_embedding_manager, tgt_padding_token=data_module.en_pad_token, embedding_dimension=embed_dim, num_encoder_layers=number_of_layers, diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index 7ef0c6a..8b2868f 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -12,7 +12,7 @@ from machine_translation import MachineTranslationModel from machine_translation.data import MachineTranslationDataModule -from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, LearnedPositionEmbedding, RotaryPositionEmbedding, ALiBiPositionEmbedding, NumericEmbeddingFacade, NoAddEmbedding, PassthroughEmbedding +from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, LearnedPositionEmbedding, RotaryPositionEmbedding, ALiBiPositionEmbedding, NumericEmbeddingManager, NoAddEmbedding, PassthroughEmbedding from attention_smithy.components import MultiheadAttention, FeedForwardNetwork from attention_smithy.attention import StandardAttentionMethod from attention_smithy.utils import seed_everything @@ -52,7 +52,7 @@ def run_training_job(parsed_args): rotary_position_embedding = RotaryPositionEmbedding(parsed_args.embedding_dimension // parsed_args.number_of_heads) if parsed_args.rotary_position else PassthroughEmbedding() alibi_position_embedding = ALiBiPositionEmbedding(parsed_args.number_of_heads) if parsed_args.alibi_position else NoAddEmbedding() - numeric_embedding_facade = NumericEmbeddingFacade(sinusoidal_position=sinusoidal_position_embedding, learned_position=learned_position_embedding, rotary_position=rotary_position_embedding, alibi_position=alibi_position_embedding) + numeric_embedding_manager = NumericEmbeddingManager(sinusoidal_position=sinusoidal_position_embedding, learned_position=learned_position_embedding, rotary_position=rotary_position_embedding, alibi_position=alibi_position_embedding) generic_attention = MultiheadAttention(embedding_dimension= parsed_args.embedding_dimension, number_of_heads= parsed_args.number_of_heads, attention_method= StandardAttentionMethod(parsed_args.dropout)) decoder_self_attention = MultiheadAttention(embedding_dimension= parsed_args.embedding_dimension, number_of_heads= parsed_args.number_of_heads, attention_method= StandardAttentionMethod(parsed_args.dropout, is_causal_masking=True)) feedforward_network = FeedForwardNetwork(parsed_args.embedding_dimension, parsed_args.feedforward_dimension, parsed_args.activation, parsed_args.dropout) @@ -63,7 +63,7 @@ def run_training_job(parsed_args): decoder_self_attention=decoder_self_attention, decoder_cross_attention=generic_attention, feedforward_network=feedforward_network, - numeric_embedding_facade=numeric_embedding_facade, + numeric_embedding_manager=numeric_embedding_manager, tgt_padding_token=data_module.en_pad_token, embedding_dimension=parsed_args.embedding_dimension, num_encoder_layers=parsed_args.number_of_layers, diff --git a/scripts/2_evaluate_model__small.py b/scripts/2_evaluate_model__small.py index c0b6af4..adeff72 100644 --- a/scripts/2_evaluate_model__small.py +++ b/scripts/2_evaluate_model__small.py @@ -7,7 +7,7 @@ from pytorch_lightning.callbacks import ModelCheckpoint from machine_translation import MachineTranslationModel from machine_translation.data import MachineTranslationDataModule -from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, NumericEmbeddingFacade +from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, NumericEmbeddingManager from attention_smithy.components import MultiheadAttention, FeedForwardNetwork from attention_smithy.attention import StandardAttentionMethod from attention_smithy.utils import seed_everything @@ -108,7 +108,7 @@ def on_train_epoch_end(self, pl_module, val_dataloader, **kwargs): print(bleu_score) sinusoidal_position_embedding = SinusoidalPositionEmbedding(embed_dim) - numeric_embedding_facade = NumericEmbeddingFacade(sinusoidal_position=sinusoidal_position_embedding) + numeric_embedding_manager = NumericEmbeddingManager(sinusoidal_position=sinusoidal_position_embedding) generic_attention = MultiheadAttention(embedding_dimension = embed_dim, number_of_heads = num_heads, attention_method = StandardAttentionMethod(dropout)) decoder_self_attention = MultiheadAttention(embedding_dimension = embed_dim, number_of_heads = num_heads, attention_method = StandardAttentionMethod(dropout, is_causal_masking=True)) feedforward_network = FeedForwardNetwork(embed_dim, dim_feedforward, 'relu', dropout) @@ -122,7 +122,7 @@ def on_train_epoch_end(self, pl_module, val_dataloader, **kwargs): decoder_self_attention=decoder_self_attention, decoder_cross_attention=generic_attention, feedforward_network=feedforward_network, - numeric_embedding_facade=numeric_embedding_facade, + numeric_embedding_manager=numeric_embedding_manager, tgt_padding_token=data_module.en_pad_token, embedding_dimension=embed_dim, num_encoder_layers=number_of_layers, diff --git a/scripts/model_script_for_nas.py b/scripts/model_script_for_nas.py index 11adcb1..13ce429 100644 --- a/scripts/model_script_for_nas.py +++ b/scripts/model_script_for_nas.py @@ -42,7 +42,7 @@ def write(self, buf): from machine_translation import MachineTranslationModel from machine_translation.data import MachineTranslationDataModule -from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, LearnedPositionEmbedding, RotaryPositionEmbedding, ALiBiPositionEmbedding, NumericEmbeddingFacade, NoAddEmbedding, PassthroughEmbedding +from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, LearnedPositionEmbedding, RotaryPositionEmbedding, ALiBiPositionEmbedding, NumericEmbeddingManager, NoAddEmbedding, PassthroughEmbedding from attention_smithy.components import MultiheadAttention, FeedForwardNetwork from attention_smithy.attention import StandardAttentionMethod from attention_smithy.utils import seed_everything @@ -84,7 +84,7 @@ def run_training_job(parsed_args): rotary_position_embedding = RotaryPositionEmbedding(parsed_args.embedding_dimension // parsed_args.number_of_heads) if parsed_args.rotary_position else PassthroughEmbedding() alibi_position_embedding = ALiBiPositionEmbedding(parsed_args.number_of_heads) if parsed_args.alibi_position else NoAddEmbedding() - numeric_embedding_facade = NumericEmbeddingFacade(sinusoidal_position=sinusoidal_position_embedding, learned_position=learned_position_embedding, rotary_position=rotary_position_embedding, alibi_position=alibi_position_embedding) + numeric_embedding_manager = NumericEmbeddingManager(sinusoidal_position=sinusoidal_position_embedding, learned_position=learned_position_embedding, rotary_position=rotary_position_embedding, alibi_position=alibi_position_embedding) generic_attention = MultiheadAttention(embedding_dimension= parsed_args.embedding_dimension, number_of_heads= parsed_args.number_of_heads, attention_method= StandardAttentionMethod(parsed_args.dropout)) decoder_self_attention = MultiheadAttention(embedding_dimension= parsed_args.embedding_dimension, number_of_heads= parsed_args.number_of_heads, attention_method= StandardAttentionMethod(parsed_args.dropout, is_causal_masking=True)) feedforward_network = FeedForwardNetwork(parsed_args.embedding_dimension, parsed_args.feedforward_dimension, parsed_args.activation, parsed_args.dropout) @@ -95,7 +95,7 @@ def run_training_job(parsed_args): decoder_self_attention=decoder_self_attention, decoder_cross_attention=generic_attention, feedforward_network=feedforward_network, - numeric_embedding_facade=numeric_embedding_facade, + numeric_embedding_manager=numeric_embedding_manager, tgt_padding_token=data_module.en_pad_token, embedding_dimension=parsed_args.embedding_dimension, num_encoder_layers=parsed_args.number_of_layers, diff --git a/src/machine_translation/MachineTranslationModel.py b/src/machine_translation/MachineTranslationModel.py index c4a6473..b7a64b8 100644 --- a/src/machine_translation/MachineTranslationModel.py +++ b/src/machine_translation/MachineTranslationModel.py @@ -20,7 +20,7 @@ def __init__(self, decoder_self_attention, decoder_cross_attention, feedforward_network, - numeric_embedding_facade, + numeric_embedding_manager, tgt_padding_token: int, scheduler_warmup_steps: int, loss_type: str, @@ -45,13 +45,13 @@ def __init__(self, feedforward_network (attention_smithy.components.feedforward): The class to be used in the feedforward block of both the encoder and the decoder. The class is duplicated and the weights are re-randomized for each duplicate. - numeric_embedding_facade (attention_smithy.numeric_embeddings.NumericEmbeddingFacade): + numeric_embedding_manager (attention_smithy.numeric_embeddings.NumericEmbeddingManager): The class that contains all numeric (position) embedding strategies to be used in the model. """ super().__init__() self.embedding_dimension = embedding_dimension - self.numeric_embedding_facade = numeric_embedding_facade + self.numeric_embedding_manager = numeric_embedding_manager self.scheduler_warmup_steps = scheduler_warmup_steps self.src_token_embedding = nn.Embedding(src_vocab_size, embedding_dimension) self.tgt_token_embedding = nn.Embedding(tgt_vocab_size, embedding_dimension) @@ -74,8 +74,8 @@ def forward(self, src_tensor, tgt_tensor, src_padding_mask, tgt_padding_mask): def forward_encode(self, src_tensor, src_padding_mask): src_embedding = self.src_token_embedding(src_tensor) * math.sqrt(self.embedding_dimension) - position_embedding = self.numeric_embedding_facade.calculate_sinusoidal_and_learned_tokenizations(src_embedding) - event_encoded = self.encoder(src=src_embedding + position_embedding, src_padding_mask=src_padding_mask, numeric_embedding_facade=self.numeric_embedding_facade) + position_embedding = self.numeric_embedding_manager.calculate_sinusoidal_and_learned_tokenizations(src_embedding) + event_encoded = self.encoder(src=src_embedding + position_embedding, src_padding_mask=src_padding_mask, numeric_embedding_manager=self.numeric_embedding_manager) return event_encoded def forward_decode(self, tgt_tensor, src_encoded, tgt_padding_mask, src_padding_mask): @@ -89,7 +89,7 @@ def forward_decode(self, tgt_tensor, src_encoded, tgt_padding_mask, src_padding_ src=src_encoded, tgt_padding_mask=tgt_padding_mask, src_padding_mask=src_padding_mask, - numeric_embedding_facade=self.numeric_embedding_facade, + numeric_embedding_manager=self.numeric_embedding_manager, ) vocabulary_logits = self.vocab_output_layer(output) return vocabulary_logits From 0c529973d802cf55e12118f1f8e6f7ca802272e1 Mon Sep 17 00:00:00 2001 From: Caleb Cranney <11773171+CCranney@users.noreply.github.com> Date: Wed, 12 Feb 2025 12:55:27 -0800 Subject: [PATCH 02/12] Update README.md --- README.md | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 17cbe01..f2f2506 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,31 @@ # machine-translation -Replication of the "Attention Is All you Need" machine translation model using AttentionSmithy. +Replication of the "[Attention Is All you Need](https://arxiv.org/abs/1706.03762)" machine translation model using [AttentionSmithy](https://github.com/xomicsdatascience/AttentionSmithy), a package for creating transformer models. + +# Main Files of note +## scripts/0_data_prep.py +This file downloads the WMT-14 German-English dataset and processes it for loading into the model. This is also where the train/val/test split occurs. + +Each dataset (train/val/test) consists of two files, one for English (en) and one for German (de), matched by line index. For example, line 5 of `train_en.txt` is the English translation of line 5 of `train_de.txt`, which consists of German text. + +The loaded dataset consists of sentences. This script converts those sentences into tokens, then adds them as a comma-delimited line to the relevant file. + +## scripts/1_train_model.py +Has much in common with the file `scripts/model_script_for_nas.py`, which was specific to use with a neural architecture search (NAS). This script assembles and trains the machine translation model. There are several arguments to be used with the script - below is an example usage. + +`python 1_train_model.py --loss_type custom --label_smoothing 0.9 --embed_dim 512 --dim_feedforward 2048 --number_of_layers=6` + +## src/machine_translation/MachineTranslationModel.py +The code for the model used in machine translation. It was written using pytorch lightning for readability, and thus outlines the construction of the model, the forward pass process, and how that looks for training and validation steps. + +## src/machine_translation/data/MachineTranslationDataModule.py +The code for preparing the data module used in training and validating the machine translation model. It is made to be used with the pytorch lightning Trainer class, as called in model training scripts. + +# Additional Files for interested readers +## scripts/run_nas.py +This code runs a neural architecture search (NAS). The code is based on the [Multi-Objective NAS with Ax](https://pytorch.org/tutorials/intermediate/ax_multiobjective_nas_tutorial.html) tutorial, and calls the `scripts/model_script_for_nas.py` in each pass with new parameters selected during the search. + +## src/machine_translation/data/LineIndexDataset.py +This code is used to extract specific lines from train, val or test datasets when forming a batch. Using this class allows the user to reference data efficiently without holding the entire dataset in memory. + +## src/machine_translation/data/LengthBatchSampler.py +This code groups samples together by context window length for efficient training. A similar strategy was employed in the original Attention Is All You Need paper. From 3212d869a0be55e65cbe617cbb37fe133e2a16b3 Mon Sep 17 00:00:00 2001 From: Caleb Cranney <11773171+CCranney@users.noreply.github.com> Date: Wed, 12 Feb 2025 13:01:50 -0800 Subject: [PATCH 03/12] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f2f2506..d901562 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # machine-translation Replication of the "[Attention Is All you Need](https://arxiv.org/abs/1706.03762)" machine translation model using [AttentionSmithy](https://github.com/xomicsdatascience/AttentionSmithy), a package for creating transformer models. -# Main Files of note +# Main Files ## scripts/0_data_prep.py This file downloads the WMT-14 German-English dataset and processes it for loading into the model. This is also where the train/val/test split occurs. From c55fc6798e3fe8d9e503248ec8b998f1c3c299b7 Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:09:29 -0800 Subject: [PATCH 04/12] update so classes initialized in pl model --- pyproject.toml | 2 +- scripts/1_train_model__command_lines.py | 54 ++--- .../MachineTranslationModel.py | 187 +++++++++++++----- 3 files changed, 164 insertions(+), 79 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e97a573..4996181 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,7 @@ dependencies = [ 'scikit-learn>=1.1.2', 'pandas>=1.2.2', 'sacrebleu>=2.4.3', - 'wandb>=0.19.0', + 'wandb>=0.19.2', ] [tool.setuptools.packages.find] diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index 8b2868f..aa902ff 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -12,9 +12,6 @@ from machine_translation import MachineTranslationModel from machine_translation.data import MachineTranslationDataModule -from attention_smithy.numeric_embeddings import SinusoidalPositionEmbedding, LearnedPositionEmbedding, RotaryPositionEmbedding, ALiBiPositionEmbedding, NumericEmbeddingManager, NoAddEmbedding, PassthroughEmbedding -from attention_smithy.components import MultiheadAttention, FeedForwardNetwork -from attention_smithy.attention import StandardAttentionMethod from attention_smithy.utils import seed_everything from attention_smithy.generators import GeneratorContext from transformers import AutoTokenizer @@ -36,41 +33,47 @@ def run_training_job(parsed_args): run_name_prefix = f'sinusoid-{parsed_args.sinusoidal_position}_learned-{parsed_args.learned_position}_rotary-{parsed_args.rotary_position}_alibi-{parsed_args.alibi_position}_dropout-{parsed_args.dropout}_activation-{parsed_args.activation}' logger = WandbLogger(project='NAS optimized vs. original', name=run_name_prefix) + # Create strategies config for multi-GPU training + strategy = 'ddp' if torch.cuda.device_count() > 1 else 'auto' + bleu_callback = BleuScoreValidationCallback() trainer = pl.Trainer( - max_epochs=40, + max_epochs=1, logger=logger, + log_every_n_steps=500, callbacks=[ bleu_callback, ], - log_every_n_steps=500, + strategy=strategy, + accelerator='auto', # Let Lightning automatically detect GPU/CPU + devices='auto' # Use all available devices ) - sinusoidal_position_embedding = SinusoidalPositionEmbedding(parsed_args.embedding_dimension) if parsed_args.sinusoidal_position else NoAddEmbedding() - learned_position_embedding = LearnedPositionEmbedding(max_sequence_length=3_000, embedding_dimension=parsed_args.embedding_dimension) if parsed_args.learned_position else NoAddEmbedding() - rotary_position_embedding = RotaryPositionEmbedding(parsed_args.embedding_dimension // parsed_args.number_of_heads) if parsed_args.rotary_position else PassthroughEmbedding() - alibi_position_embedding = ALiBiPositionEmbedding(parsed_args.number_of_heads) if parsed_args.alibi_position else NoAddEmbedding() - - numeric_embedding_manager = NumericEmbeddingManager(sinusoidal_position=sinusoidal_position_embedding, learned_position=learned_position_embedding, rotary_position=rotary_position_embedding, alibi_position=alibi_position_embedding) - generic_attention = MultiheadAttention(embedding_dimension= parsed_args.embedding_dimension, number_of_heads= parsed_args.number_of_heads, attention_method= StandardAttentionMethod(parsed_args.dropout)) - decoder_self_attention = MultiheadAttention(embedding_dimension= parsed_args.embedding_dimension, number_of_heads= parsed_args.number_of_heads, attention_method= StandardAttentionMethod(parsed_args.dropout, is_causal_masking=True)) - feedforward_network = FeedForwardNetwork(parsed_args.embedding_dimension, parsed_args.feedforward_dimension, parsed_args.activation, parsed_args.dropout) + # Convert args to kwargs dict for model initialization + model_kwargs = { + 'embedding_dimension': parsed_args.embedding_dimension, + 'number_of_heads': parsed_args.number_of_heads, + 'dropout': parsed_args.dropout, + 'activation': parsed_args.activation, + 'feedforward_dimension': parsed_args.feedforward_dimension, + 'num_encoder_layers': parsed_args.number_of_layers, + 'num_decoder_layers': parsed_args.number_of_layers, + 'scheduler_warmup_steps': parsed_args.scheduler_warmup_steps, + 'loss_type': parsed_args.loss_type, + 'label_smoothing': parsed_args.label_smoothing, + 'use_sinusoidal': parsed_args.sinusoidal_position, + 'use_learned': parsed_args.learned_position, + 'use_rotary': parsed_args.rotary_position, + 'use_alibi': parsed_args.alibi_position, + } + + # Create model with required args and kwargs model = MachineTranslationModel( src_vocab_size=data_module.de_vocab_size, tgt_vocab_size=data_module.en_vocab_size, - encoder_self_attention=generic_attention, - decoder_self_attention=decoder_self_attention, - decoder_cross_attention=generic_attention, - feedforward_network=feedforward_network, - numeric_embedding_manager=numeric_embedding_manager, tgt_padding_token=data_module.en_pad_token, - embedding_dimension=parsed_args.embedding_dimension, - num_encoder_layers=parsed_args.number_of_layers, - num_decoder_layers=parsed_args.number_of_layers, - scheduler_warmup_steps = parsed_args.scheduler_warmup_steps, - loss_type= parsed_args.loss_type, - label_smoothing = parsed_args.label_smoothing, + **model_kwargs ) trainer.fit(model, data_module) @@ -79,6 +82,7 @@ def run_training_job(parsed_args): bleu_score = bleu_callback.bleu_score return bleu_score + class BleuScoreValidationCallback(pl.Callback): def __init__(self): self.generator = GeneratorContext(method='beam_batch') diff --git a/src/machine_translation/MachineTranslationModel.py b/src/machine_translation/MachineTranslationModel.py index b7a64b8..24e4454 100644 --- a/src/machine_translation/MachineTranslationModel.py +++ b/src/machine_translation/MachineTranslationModel.py @@ -7,65 +7,146 @@ from copy import deepcopy import math from attention_smithy.components import Encoder, Decoder, EncoderLayer, DecoderLayer +from attention_smithy.numeric_embeddings import ( + SinusoidalPositionEmbedding, LearnedPositionEmbedding, + RotaryPositionEmbedding, ALiBiPositionEmbedding, + NumericEmbeddingManager, NoAddEmbedding, PassthroughEmbedding +) +from attention_smithy.components import MultiheadAttention, FeedForwardNetwork +from attention_smithy.attention import StandardAttentionMethod from machine_translation.loss import MaskedLoss, LabelSmoothingLoss + class MachineTranslationModel(pl.LightningModule): - """ - The full transformer model that performs a machine translation task. - """ - def __init__(self, - src_vocab_size: int, - tgt_vocab_size: int, - encoder_self_attention, - decoder_self_attention, - decoder_cross_attention, - feedforward_network, - numeric_embedding_manager, - tgt_padding_token: int, - scheduler_warmup_steps: int, - loss_type: str, - label_smoothing: float, - embedding_dimension: int=512, - num_encoder_layers: int=6, - num_decoder_layers: int=6, - dropout: float=0.1, - ): + def __init__(self, src_vocab_size: int, tgt_vocab_size: int, tgt_padding_token: int, **kwargs): """ - Args: - src_vocab_size (int): The number of total possible tokens in the language being translated - FROM (in German-to-English, this would be the number of possible German tokens). - tgt_vocab_size (int): The number of total possible tokens in the language being translated - TO (in German-to-English, this would be the number of possible English tokens). - encoder_self_attention (AttentionMethod): The attention method used for the encoder self - attention block. See AttentionSmithy.attention for available attention methods. - decoder_self_attention (AttentionMethod): The attention method used for the decoder self - attention block. See AttentionSmithy.attention for available attention methods. - decoder_cross_attention (AttentionMethod): The attention method used for the decoder cross - attention block. See AttentionSmithy.attention for available attention methods. - feedforward_network (attention_smithy.components.feedforward): The class to be used in - the feedforward block of both the encoder and the decoder. The class is duplicated - and the weights are re-randomized for each duplicate. - numeric_embedding_manager (attention_smithy.numeric_embeddings.NumericEmbeddingManager): - The class that contains all numeric (position) embedding strategies to be used in - the model. + Initialize the model with required parameters and optional kwargs. + + Required Args: + src_vocab_size (int): Size of source vocabulary + tgt_vocab_size (int): Size of target vocabulary + tgt_padding_token (int): Padding token ID for target vocabulary + + Optional Args (kwargs): + embedding_dimension (int): Dimension of embeddings (default: 512) + number_of_heads (int): Number of attention heads (default: 8) + dropout (float): Dropout rate (default: 0.1) + activation (str): Activation function (default: 'relu') + feedforward_dimension (int): Dimension of feedforward layer (default: 2048) + num_encoder_layers (int): Number of encoder layers (default: 6) + num_decoder_layers (int): Number of decoder layers (default: 6) + scheduler_warmup_steps (int): Warmup steps for scheduler (default: 4000) + loss_type (str): Type of loss function (default: 'custom') + label_smoothing (float): Label smoothing value (default: 0.9) + use_sinusoidal (bool): Use sinusoidal position embedding (default: False) + use_learned (bool): Use learned position embedding (default: False) + use_rotary (bool): Use rotary position embedding (default: False) + use_alibi (bool): Use ALiBi position embedding (default: False) """ super().__init__() - self.embedding_dimension = embedding_dimension - self.numeric_embedding_manager = numeric_embedding_manager - self.scheduler_warmup_steps = scheduler_warmup_steps - self.src_token_embedding = nn.Embedding(src_vocab_size, embedding_dimension) - self.tgt_token_embedding = nn.Embedding(tgt_vocab_size, embedding_dimension) - encoder_layer = EncoderLayer(embedding_dimension, encoder_self_attention, feedforward_network, dropout) - self.encoder = Encoder(encoder_layer, number_of_layers=num_encoder_layers) - decoder_layer = DecoderLayer(embedding_dimension, decoder_self_attention, decoder_cross_attention, feedforward_network, dropout) - self.decoder = Decoder(decoder_layer, number_of_layers=num_decoder_layers) - self.vocab_output_layer = VocabOutputSoftmaxLayer(embedding_dimension, tgt_vocab_size) - if loss_type == 'custom': - self.loss_method = LabelSmoothingLoss(tgt_padding_token, confidence_probability_score=label_smoothing) - elif loss_type == 'simple': - self.loss_method = MaskedLoss(tgt_padding_token, label_smoothing=label_smoothing) - else: - raise RuntimeError("not a valid loss type") + + # Set default values for kwargs + self.config = { + 'embedding_dimension': 512, + 'number_of_heads': 8, + 'dropout': 0.1, + 'activation': 'relu', + 'feedforward_dimension': 2048, + 'num_encoder_layers': 6, + 'num_decoder_layers': 6, + 'scheduler_warmup_steps': 4000, + 'loss_type': 'custom', + 'label_smoothing': 0.9, + 'use_sinusoidal': False, + 'use_learned': False, + 'use_rotary': False, + 'use_alibi': False, + } + + self.config.update(kwargs) + + self.save_hyperparameters() + + self.embedding_dimension = self.config['embedding_dimension'] + self.src_token_embedding = nn.Embedding(src_vocab_size, self.embedding_dimension) + self.tgt_token_embedding = nn.Embedding(tgt_vocab_size, self.embedding_dimension) + + self.numeric_embedding_manager = self._create_embedding_manager() + + generic_attention = MultiheadAttention( + embedding_dimension=self.embedding_dimension, + number_of_heads=self.config['number_of_heads'], + attention_method=StandardAttentionMethod(self.config['dropout']) + ) + + decoder_self_attention = MultiheadAttention( + embedding_dimension=self.embedding_dimension, + number_of_heads=self.config['number_of_heads'], + attention_method=StandardAttentionMethod(self.config['dropout'], is_causal_masking=True) + ) + + feedforward_network = FeedForwardNetwork( + self.embedding_dimension, + self.config['feedforward_dimension'], + self.config['activation'], + self.config['dropout'] + ) + + encoder_layer = EncoderLayer( + self.embedding_dimension, + generic_attention, + feedforward_network, + self.config['dropout'] + ) + self.encoder = Encoder(encoder_layer, number_of_layers=self.config['num_encoder_layers']) + + decoder_layer = DecoderLayer( + self.embedding_dimension, + decoder_self_attention, + generic_attention, # Cross attention + feedforward_network, + self.config['dropout'] + ) + self.decoder = Decoder(decoder_layer, number_of_layers=self.config['num_decoder_layers']) + + self.vocab_output_layer = VocabOutputSoftmaxLayer(self.embedding_dimension, tgt_vocab_size) + + self.loss_method = ( + LabelSmoothingLoss(tgt_padding_token, confidence_probability_score=self.config['label_smoothing']) + if self.config['loss_type'] == 'custom' + else MaskedLoss(tgt_padding_token, label_smoothing=self.config['label_smoothing']) + ) + + self.scheduler_warmup_steps = self.config['scheduler_warmup_steps'] + + def _create_embedding_manager(self): + """Create embedding manager with specified embedding types from config.""" + sinusoidal_position = ( + SinusoidalPositionEmbedding(self.embedding_dimension) + if self.config['use_sinusoidal'] else NoAddEmbedding() + ) + + learned_position = ( + LearnedPositionEmbedding(max_sequence_length=3_000, embedding_dimension=self.embedding_dimension) + if self.config['use_learned'] else NoAddEmbedding() + ) + + rotary_position = ( + RotaryPositionEmbedding(self.embedding_dimension // self.config['number_of_heads']) + if self.config['use_rotary'] else PassthroughEmbedding() + ) + + alibi_position = ( + ALiBiPositionEmbedding(self.config['number_of_heads']) + if self.config['use_alibi'] else NoAddEmbedding() + ) + + return NumericEmbeddingManager( + sinusoidal_position=sinusoidal_position, + learned_position=learned_position, + rotary_position=rotary_position, + alibi_position=alibi_position + ) def forward(self, src_tensor, tgt_tensor, src_padding_mask, tgt_padding_mask): src_encoded = self.forward_encode(src_tensor, src_padding_mask) From 20d60b78311b18e6ce14fb80e4bec6271999fac6 Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:29:28 -0800 Subject: [PATCH 05/12] updated the batch sampler for multi-gpu training --- .../data/LengthBatchSampler.py | 72 +++++++++++-------- .../data/MachineTranslationDataModule.py | 15 ++-- 2 files changed, 51 insertions(+), 36 deletions(-) diff --git a/src/machine_translation/data/LengthBatchSampler.py b/src/machine_translation/data/LengthBatchSampler.py index 761bfa1..4b4d7d8 100644 --- a/src/machine_translation/data/LengthBatchSampler.py +++ b/src/machine_translation/data/LengthBatchSampler.py @@ -1,41 +1,53 @@ -from torch.utils.data import Sampler +import torch +from torch.utils.data import BatchSampler, Sampler import random -class LengthBatchSampler(Sampler): - def __init__(self, dataset, batch_size, shuffle=False):#, max_tokens=10_000): - self.dataset = dataset - self.batch_size = batch_size - #self.max_tokens = max_tokens - self.lengths = self.dataset.lengths - self.shuffle = shuffle - self.batches = self._setup_batches() - def __len__(self): - return len(self.batches) +class LengthBatchSampler(BatchSampler): + def __init__(self, sampler, batch_size, drop_last=False): + if not isinstance(sampler, Sampler): + raise ValueError("sampler should be an instance of " + "torch.utils.data.Sampler, but got sampler={}" + .format(sampler)) + if not isinstance(batch_size, int) or batch_size <= 0: + raise ValueError("batch_size should be a positive integer value, " + "but got batch_size={}".format(batch_size)) + if not isinstance(drop_last, bool): + raise ValueError("drop_last should be a boolean value, but got " + "drop_last={}".format(drop_last)) - def __iter__(self): - return iter(self.batches) + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + self.lengths = getattr(sampler.data_source, 'lengths', None) + if self.lengths is None: + raise ValueError("Dataset must have a 'lengths' attribute") - def _setup_batches(self): - indices = list(range(len(self.dataset))) + def __iter__(self): + indices = list(self.sampler) + # Sort indices by sequence length indices.sort(key=lambda i: self.lengths[i]) + batches = [] - current_batch = [] - current_tokens = 0 + batch = [] + for idx in indices: - sample_length = self.lengths[idx] - if len(current_batch) >= self.batch_size: - #if current_tokens + sample_length > self.max_tokens: - batches.append(current_batch) - current_batch = [idx] - current_tokens = sample_length - else: - current_batch.append(idx) - current_tokens += sample_length - if current_batch: - batches.append(current_batch) - if self.shuffle: + batch.append(idx) + if len(batch) == self.batch_size: + batches.append(batch) + batch = [] + + if len(batch) > 0 and not self.drop_last: + batches.append(batch) + + if isinstance(self.sampler, torch.utils.data.RandomSampler): random.shuffle(batches) - return batches + for batch in batches: + yield batch + def __len__(self): + if self.drop_last: + return len(self.sampler) // self.batch_size + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size \ No newline at end of file diff --git a/src/machine_translation/data/MachineTranslationDataModule.py b/src/machine_translation/data/MachineTranslationDataModule.py index 7aabe0b..f4afa0c 100644 --- a/src/machine_translation/data/MachineTranslationDataModule.py +++ b/src/machine_translation/data/MachineTranslationDataModule.py @@ -32,16 +32,19 @@ def setup(self, stage=None): self.test_dataset = LineIndexDataset(f'{data_directory}/test{self.de_filepath_suffix}', f'{data_directory}/test{self.en_filepath_suffix}', self.num_training_samples) def train_dataloader(self): - sampler = LengthBatchSampler(self.train_dataset, batch_size=self.batch_size, shuffle=True) - return DataLoader(self.train_dataset, batch_sampler=sampler, collate_fn=self._collate_function) + sampler = torch.utils.data.RandomSampler(self.train_dataset) + batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False) + return DataLoader(self.train_dataset, batch_sampler=batch_sampler, collate_fn=self._collate_function) def val_dataloader(self): - sampler = LengthBatchSampler(self.val_dataset, batch_size=self.batch_size) - return DataLoader(self.val_dataset, batch_sampler=sampler, collate_fn=self._collate_function) + sampler = torch.utils.data.SequentialSampler(self.val_dataset) + batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False) + return DataLoader(self.val_dataset, batch_sampler=batch_sampler, collate_fn=self._collate_function) def test_dataloader(self): - sampler = LengthBatchSampler(self.test_dataset, batch_size=self.batch_size) - return DataLoader(self.test_dataset, batch_sampler=sampler, collate_fn=self._collate_function) + sampler = torch.utils.data.SequentialSampler(self.test_dataset) + batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False) + return DataLoader(self.test_dataset, batch_sampler=batch_sampler, collate_fn=self._collate_function) def _collate_function(self, batch): input_tensors, expected_output_tensors = zip(*batch) From 8bb43ad540967eeb6b8231fd9d1b3eee4bd15216 Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:38:38 -0800 Subject: [PATCH 06/12] updated the batch sampler to use a distributed sampler --- .../data/LengthBatchSampler.py | 28 +++++++++++-------- .../data/MachineTranslationDataModule.py | 9 ++++-- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/machine_translation/data/LengthBatchSampler.py b/src/machine_translation/data/LengthBatchSampler.py index 4b4d7d8..b5580b3 100644 --- a/src/machine_translation/data/LengthBatchSampler.py +++ b/src/machine_translation/data/LengthBatchSampler.py @@ -1,30 +1,32 @@ import torch from torch.utils.data import BatchSampler, Sampler +import torch.distributed as dist import random class LengthBatchSampler(BatchSampler): - def __init__(self, sampler, batch_size, drop_last=False): + def __init__(self, sampler, batch_size, drop_last=False, dataset=None): if not isinstance(sampler, Sampler): raise ValueError("sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}" .format(sampler)) - if not isinstance(batch_size, int) or batch_size <= 0: - raise ValueError("batch_size should be a positive integer value, " - "but got batch_size={}".format(batch_size)) - if not isinstance(drop_last, bool): - raise ValueError("drop_last should be a boolean value, but got " - "drop_last={}".format(drop_last)) self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last - self.lengths = getattr(sampler.data_source, 'lengths', None) - if self.lengths is None: - raise ValueError("Dataset must have a 'lengths' attribute") + + # Handle both regular and distributed cases + if hasattr(sampler, 'data_source'): + self.lengths = getattr(sampler.data_source, 'lengths', None) + elif dataset is not None: + self.lengths = dataset.lengths + else: + raise ValueError("Either sampler must have data_source with lengths or dataset must be provided") def __iter__(self): + # Get indices from sampler (handles both distributed and non-distributed cases) indices = list(self.sampler) + # Sort indices by sequence length indices.sort(key=lambda i: self.lengths[i]) @@ -40,8 +42,10 @@ def __iter__(self): if len(batch) > 0 and not self.drop_last: batches.append(batch) - if isinstance(self.sampler, torch.utils.data.RandomSampler): - random.shuffle(batches) + # Shuffle batches if using a random sampler + if not isinstance(self.sampler, torch.utils.data.distributed.DistributedSampler): + if isinstance(self.sampler, torch.utils.data.RandomSampler): + random.shuffle(batches) for batch in batches: yield batch diff --git a/src/machine_translation/data/MachineTranslationDataModule.py b/src/machine_translation/data/MachineTranslationDataModule.py index f4afa0c..716264f 100644 --- a/src/machine_translation/data/MachineTranslationDataModule.py +++ b/src/machine_translation/data/MachineTranslationDataModule.py @@ -33,17 +33,20 @@ def setup(self, stage=None): def train_dataloader(self): sampler = torch.utils.data.RandomSampler(self.train_dataset) - batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False) + batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False, + dataset=self.train_dataset) return DataLoader(self.train_dataset, batch_sampler=batch_sampler, collate_fn=self._collate_function) def val_dataloader(self): sampler = torch.utils.data.SequentialSampler(self.val_dataset) - batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False) + batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False, + dataset=self.val_dataset) return DataLoader(self.val_dataset, batch_sampler=batch_sampler, collate_fn=self._collate_function) def test_dataloader(self): sampler = torch.utils.data.SequentialSampler(self.test_dataset) - batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False) + batch_sampler = LengthBatchSampler(sampler, batch_size=self.batch_size, drop_last=False, + dataset=self.test_dataset) return DataLoader(self.test_dataset, batch_sampler=batch_sampler, collate_fn=self._collate_function) def _collate_function(self, batch): From 6231bf4d17f9d003a9bbd7ea373d675d5850807a Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Wed, 19 Feb 2025 08:18:46 -0800 Subject: [PATCH 07/12] adjusting batch size and learning rate to scale with number of GPUs for multi-GPU training --- scripts/1_train_model__command_lines.py | 6 +++++- src/machine_translation/MachineTranslationModel.py | 7 ++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index aa902ff..06f806a 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -21,11 +21,15 @@ def run_training_job(parsed_args): seed_everything(parsed_args.random_seed) torch.set_float32_matmul_precision('medium') + num_gpus = torch.cuda.device_count() + effective_batch_size = parsed_args.batch_size + per_gpu_batch_size = effective_batch_size // num_gpus if num_gpus > 1 else effective_batch_size + data_module = MachineTranslationDataModule( en_filepath_suffix='_en.txt', de_filepath_suffix='_de.txt', maximum_length=parsed_args.maximum_length, - batch_size=parsed_args.batch_size, + batch_size=per_gpu_batch_size, num_training_samples=parsed_args.num_training_samples, ) data_module.setup() diff --git a/src/machine_translation/MachineTranslationModel.py b/src/machine_translation/MachineTranslationModel.py index 24e4454..41fdcf8 100644 --- a/src/machine_translation/MachineTranslationModel.py +++ b/src/machine_translation/MachineTranslationModel.py @@ -194,9 +194,10 @@ def configure_optimizers(self): def lr_lambda(step): step = step + 1 - lr = self.embedding_dimension**(-0.5) * min(step**(-0.5), step * self.scheduler_warmup_steps**(-1.5)) - #print(f'\n\n{lr}\n') - return lr + num_gpus = torch.cuda.device_count() + base_lr = self.embedding_dimension ** (-0.5) * min(step ** (-0.5), + step * self.scheduler_warmup_steps ** (-1.5)) + return base_lr * num_gpus scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda) return [optimizer], [{'scheduler': scheduler, 'interval': 'step'}] From 6f61dc17cb2b224ef0a95c187406166848f6649a Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 24 Feb 2025 10:58:49 -0800 Subject: [PATCH 08/12] further updates to learning rate calculation --- src/machine_translation/MachineTranslationModel.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/machine_translation/MachineTranslationModel.py b/src/machine_translation/MachineTranslationModel.py index 41fdcf8..e03f8e8 100644 --- a/src/machine_translation/MachineTranslationModel.py +++ b/src/machine_translation/MachineTranslationModel.py @@ -189,19 +189,21 @@ def validation_step(self, batch, batch_idx): self.log("val_loss", loss, prog_bar=False, batch_size=vocabulary_logits.shape[0]) return loss + def configure_optimizers(self): optimizer = Adam(self.parameters(), lr=1.0, betas=(0.9, 0.98), eps=1e-9) + num_gpus = torch.cuda.device_count() + effective_warmup_steps = self.scheduler_warmup_steps // num_gpus def lr_lambda(step): step = step + 1 - num_gpus = torch.cuda.device_count() - base_lr = self.embedding_dimension ** (-0.5) * min(step ** (-0.5), - step * self.scheduler_warmup_steps ** (-1.5)) - return base_lr * num_gpus + lr = self.embedding_dimension ** (-0.5) * min(step ** (-0.5), step * effective_warmup_steps ** (-1.5)) + return lr scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda) return [optimizer], [{'scheduler': scheduler, 'interval': 'step'}] + class VocabOutputSoftmaxLayer(nn.Module): def __init__(self, embedding_dimension: int, From 8b480cd5cd76c73e0f88847d9c01585e9ea06870 Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 24 Feb 2025 12:47:21 -0800 Subject: [PATCH 09/12] update to match attention smithy update (name change) --- scripts/1_train_model__command_lines.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index 06f806a..a8b1463 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -89,7 +89,7 @@ def run_training_job(parsed_args): class BleuScoreValidationCallback(pl.Callback): def __init__(self): - self.generator = GeneratorContext(method='beam_batch') + self.generator = GeneratorContext(method='beam') self.de_tokenizer = AutoTokenizer.from_pretrained('bert-base-german-cased') self.en_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') From 5c824a86888570e31c1a3ec311e0d0d04c1dc8d4 Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Mon, 24 Feb 2025 13:08:00 -0800 Subject: [PATCH 10/12] update batch size --- scripts/1_train_model__command_lines.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index a8b1463..480c3f9 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -156,7 +156,7 @@ def parse_args(): parser.add_argument('--label_smoothing', type=float, default=0.9, help='Label smoothing value') parser.add_argument('--scheduler_warmup_steps', type=int, default=4000, help='Number of warmup steps for scheduler') parser.add_argument('--maximum_length', type=int, default=100, help='Maximum sequence length') - parser.add_argument('--batch_size', type=int, default=64, help='Batch size') + parser.add_argument('--batch_size', type=int, default=128, help='Batch size') parser.add_argument('--embedding_dimension', type=int, default=512, help='Embedding dimension. Original model used 512') parser.add_argument('--number_of_heads', type=int, default=8, help='Number of attention heads. Original model used 8') parser.add_argument('--feedforward_dimension', type=int, default=2048, help='Feedforward dimension. Original model used 2048.') From 6518f9da52033876e41384952b9b7d5a35154afd Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Sat, 15 Mar 2025 13:32:45 -0700 Subject: [PATCH 11/12] updated code to account for refactored numeric embedding manager class (for strategy design pattern) --- scripts/1_train_model__command_lines.py | 12 +- .../MachineTranslationModel.py | 161 +++++++++--------- 2 files changed, 86 insertions(+), 87 deletions(-) diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index 480c3f9..aca8061 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -12,7 +12,7 @@ from machine_translation import MachineTranslationModel from machine_translation.data import MachineTranslationDataModule -from attention_smithy.utils import seed_everything +from attention_smithy.utils import seed_everything, get_available_gpu_count from attention_smithy.generators import GeneratorContext from transformers import AutoTokenizer from sacrebleu.metrics import BLEU @@ -21,7 +21,7 @@ def run_training_job(parsed_args): seed_everything(parsed_args.random_seed) torch.set_float32_matmul_precision('medium') - num_gpus = torch.cuda.device_count() + num_gpus = get_available_gpu_count() effective_batch_size = parsed_args.batch_size per_gpu_batch_size = effective_batch_size // num_gpus if num_gpus > 1 else effective_batch_size @@ -38,7 +38,7 @@ def run_training_job(parsed_args): logger = WandbLogger(project='NAS optimized vs. original', name=run_name_prefix) # Create strategies config for multi-GPU training - strategy = 'ddp' if torch.cuda.device_count() > 1 else 'auto' + strategy = 'ddp' if num_gpus > 1 else 'auto' bleu_callback = BleuScoreValidationCallback() @@ -50,11 +50,10 @@ def run_training_job(parsed_args): bleu_callback, ], strategy=strategy, - accelerator='auto', # Let Lightning automatically detect GPU/CPU - devices='auto' # Use all available devices + accelerator='auto', + devices='auto' ) - # Convert args to kwargs dict for model initialization model_kwargs = { 'embedding_dimension': parsed_args.embedding_dimension, 'number_of_heads': parsed_args.number_of_heads, @@ -72,7 +71,6 @@ def run_training_job(parsed_args): 'use_alibi': parsed_args.alibi_position, } - # Create model with required args and kwargs model = MachineTranslationModel( src_vocab_size=data_module.de_vocab_size, tgt_vocab_size=data_module.en_vocab_size, diff --git a/src/machine_translation/MachineTranslationModel.py b/src/machine_translation/MachineTranslationModel.py index e03f8e8..5376843 100644 --- a/src/machine_translation/MachineTranslationModel.py +++ b/src/machine_translation/MachineTranslationModel.py @@ -10,13 +10,13 @@ from attention_smithy.numeric_embeddings import ( SinusoidalPositionEmbedding, LearnedPositionEmbedding, RotaryPositionEmbedding, ALiBiPositionEmbedding, - NumericEmbeddingManager, NoAddEmbedding, PassthroughEmbedding + NumericEmbeddingManager ) from attention_smithy.components import MultiheadAttention, FeedForwardNetwork from attention_smithy.attention import StandardAttentionMethod +from attention_smithy.utils import get_available_gpu_count from machine_translation.loss import MaskedLoss, LabelSmoothingLoss - class MachineTranslationModel(pl.LightningModule): def __init__(self, src_vocab_size: int, tgt_vocab_size: int, tgt_padding_token: int, **kwargs): """ @@ -67,49 +67,15 @@ def __init__(self, src_vocab_size: int, tgt_vocab_size: int, tgt_padding_token: self.save_hyperparameters() - self.embedding_dimension = self.config['embedding_dimension'] - self.src_token_embedding = nn.Embedding(src_vocab_size, self.embedding_dimension) - self.tgt_token_embedding = nn.Embedding(tgt_vocab_size, self.embedding_dimension) + self.src_token_embedding = nn.Embedding(src_vocab_size, self.config['embedding_dimension']) + self.tgt_token_embedding = nn.Embedding(tgt_vocab_size, self.config['embedding_dimension']) self.numeric_embedding_manager = self._create_embedding_manager() - generic_attention = MultiheadAttention( - embedding_dimension=self.embedding_dimension, - number_of_heads=self.config['number_of_heads'], - attention_method=StandardAttentionMethod(self.config['dropout']) - ) - - decoder_self_attention = MultiheadAttention( - embedding_dimension=self.embedding_dimension, - number_of_heads=self.config['number_of_heads'], - attention_method=StandardAttentionMethod(self.config['dropout'], is_causal_masking=True) - ) - - feedforward_network = FeedForwardNetwork( - self.embedding_dimension, - self.config['feedforward_dimension'], - self.config['activation'], - self.config['dropout'] - ) - - encoder_layer = EncoderLayer( - self.embedding_dimension, - generic_attention, - feedforward_network, - self.config['dropout'] - ) - self.encoder = Encoder(encoder_layer, number_of_layers=self.config['num_encoder_layers']) - - decoder_layer = DecoderLayer( - self.embedding_dimension, - decoder_self_attention, - generic_attention, # Cross attention - feedforward_network, - self.config['dropout'] - ) - self.decoder = Decoder(decoder_layer, number_of_layers=self.config['num_decoder_layers']) - - self.vocab_output_layer = VocabOutputSoftmaxLayer(self.embedding_dimension, tgt_vocab_size) + decoder_self_attention, feedforward_network, generic_attention = self._initialize_sublayer_components() + self.encoder = self._initialize_encoder(feedforward_network, generic_attention) + self.decoder = self._initialize_decoder(decoder_self_attention, feedforward_network, generic_attention) + self.vocab_output_layer = VocabOutputSoftmaxLayer(self.config['embedding_dimension'], tgt_vocab_size) self.loss_method = ( LabelSmoothingLoss(tgt_padding_token, confidence_probability_score=self.config['label_smoothing']) @@ -119,52 +85,20 @@ def __init__(self, src_vocab_size: int, tgt_vocab_size: int, tgt_padding_token: self.scheduler_warmup_steps = self.config['scheduler_warmup_steps'] - def _create_embedding_manager(self): - """Create embedding manager with specified embedding types from config.""" - sinusoidal_position = ( - SinusoidalPositionEmbedding(self.embedding_dimension) - if self.config['use_sinusoidal'] else NoAddEmbedding() - ) - - learned_position = ( - LearnedPositionEmbedding(max_sequence_length=3_000, embedding_dimension=self.embedding_dimension) - if self.config['use_learned'] else NoAddEmbedding() - ) - - rotary_position = ( - RotaryPositionEmbedding(self.embedding_dimension // self.config['number_of_heads']) - if self.config['use_rotary'] else PassthroughEmbedding() - ) - - alibi_position = ( - ALiBiPositionEmbedding(self.config['number_of_heads']) - if self.config['use_alibi'] else NoAddEmbedding() - ) - - return NumericEmbeddingManager( - sinusoidal_position=sinusoidal_position, - learned_position=learned_position, - rotary_position=rotary_position, - alibi_position=alibi_position - ) - def forward(self, src_tensor, tgt_tensor, src_padding_mask, tgt_padding_mask): src_encoded = self.forward_encode(src_tensor, src_padding_mask) vocabulary_logits = self.forward_decode(tgt_tensor, src_encoded, tgt_padding_mask, src_padding_mask) return vocabulary_logits def forward_encode(self, src_tensor, src_padding_mask): - src_embedding = self.src_token_embedding(src_tensor) * math.sqrt(self.embedding_dimension) - position_embedding = self.numeric_embedding_manager.calculate_sinusoidal_and_learned_tokenizations(src_embedding) + src_embedding = self.src_token_embedding(src_tensor) * math.sqrt(self.config['embedding_dimension']) + position_embedding = self.numeric_embedding_manager.create_positional_or_custom_embedding(token_embedding=src_embedding) event_encoded = self.encoder(src=src_embedding + position_embedding, src_padding_mask=src_padding_mask, numeric_embedding_manager=self.numeric_embedding_manager) return event_encoded def forward_decode(self, tgt_tensor, src_encoded, tgt_padding_mask, src_padding_mask): - if tgt_tensor.shape[0] != src_encoded.shape[0]: - beam_width = tgt_tensor.shape[0] // src_encoded.shape[0] - src_encoded = src_encoded.repeat_interleave(beam_width, dim=0) - src_padding_mask = src_padding_mask.repeat_interleave(beam_width, dim=0) - tgt_embedding = self.tgt_token_embedding(tgt_tensor) * math.sqrt(self.embedding_dimension) + src_encoded, src_padding_mask = self._expand_inputs_for_beam_generation_if_applicable(src_encoded, src_padding_mask, tgt_tensor) + tgt_embedding = self.tgt_token_embedding(tgt_tensor) * math.sqrt(self.config['embedding_dimension']) output = self.decoder( tgt=tgt_embedding, src=src_encoded, @@ -192,18 +126,85 @@ def validation_step(self, batch, batch_idx): def configure_optimizers(self): optimizer = Adam(self.parameters(), lr=1.0, betas=(0.9, 0.98), eps=1e-9) - num_gpus = torch.cuda.device_count() + num_gpus = get_available_gpu_count() effective_warmup_steps = self.scheduler_warmup_steps // num_gpus def lr_lambda(step): step = step + 1 - lr = self.embedding_dimension ** (-0.5) * min(step ** (-0.5), step * effective_warmup_steps ** (-1.5)) + lr = self.config['embedding_dimension'] ** (-0.5) * min(step ** (-0.5), step * effective_warmup_steps ** (-1.5)) return lr scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda) return [optimizer], [{'scheduler': scheduler, 'interval': 'step'}] + + + + def _initialize_decoder(self, decoder_self_attention, feedforward_network, generic_attention): + decoder_layer = DecoderLayer( + self.config['embedding_dimension'], + decoder_self_attention, + generic_attention, # Cross attention + feedforward_network, + self.config['dropout'] + ) + decoder = Decoder(decoder_layer, number_of_layers=self.config['num_decoder_layers']) + return decoder + + def _initialize_encoder(self, feedforward_network, generic_attention): + encoder_layer = EncoderLayer( + self.config['embedding_dimension'], + generic_attention, + feedforward_network, + self.config['dropout'] + ) + return Encoder(encoder_layer, number_of_layers=self.config['num_encoder_layers']) + + def _initialize_sublayer_components(self): + generic_attention = MultiheadAttention( + embedding_dimension=self.config['embedding_dimension'], + number_of_heads=self.config['number_of_heads'], + attention_method=StandardAttentionMethod(self.config['dropout']) + ) + decoder_self_attention = MultiheadAttention( + embedding_dimension=self.config['embedding_dimension'], + number_of_heads=self.config['number_of_heads'], + attention_method=StandardAttentionMethod(self.config['dropout'], is_causal_masking=True) + ) + feedforward_network = FeedForwardNetwork( + self.config['embedding_dimension'], + self.config['feedforward_dimension'], + self.config['activation'], + self.config['dropout'] + ) + return decoder_self_attention, feedforward_network, generic_attention + + def _create_embedding_manager(self): + embedding_strategies = [] + if self.config['use_sinusoidal']: + embedding_strategies.append(SinusoidalPositionEmbedding(self.config['embedding_dimension'])) + + if self.config['use_learned']: + embedding_strategies.append(LearnedPositionEmbedding(max_sequence_length=3_000, embedding_dimension=self.config['embedding_dimension'])) + + if self.config['use_rotary']: + embedding_strategies.append(RotaryPositionEmbedding(self.config['embedding_dimension'] // self.config['number_of_heads'])) + + if self.config['use_alibi']: + embedding_strategies.append(ALiBiPositionEmbedding(self.config['number_of_heads'])) + + return NumericEmbeddingManager(embedding_strategies) + + def _expand_inputs_for_beam_generation_if_applicable(self, src_encoded, src_padding_mask, tgt_tensor): + if tgt_tensor.shape[0] != src_encoded.shape[0]: + beam_width = tgt_tensor.shape[0] // src_encoded.shape[0] + src_encoded = src_encoded.repeat_interleave(beam_width, dim=0) + src_padding_mask = src_padding_mask.repeat_interleave(beam_width, dim=0) + return src_encoded, src_padding_mask + + + class VocabOutputSoftmaxLayer(nn.Module): def __init__(self, embedding_dimension: int, From 3e76d9f942312bcf80345ab19e5a2ffd4f9342f1 Mon Sep 17 00:00:00 2001 From: CCranney <11773171+CCranney@users.noreply.github.com> Date: Tue, 15 Apr 2025 12:39:29 -0700 Subject: [PATCH 12/12] argparse description and help text update --- scripts/1_train_model__command_lines.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/1_train_model__command_lines.py b/scripts/1_train_model__command_lines.py index aca8061..0cf1996 100644 --- a/scripts/1_train_model__command_lines.py +++ b/scripts/1_train_model__command_lines.py @@ -142,8 +142,8 @@ def on_train_epoch_end(self, trainer, pl_module, **kwargs): self.bleu_score = bleu_score.score def parse_args(): - parser = argparse.ArgumentParser(description="generformer-nas") - parser.add_argument("--log_path", type=str, required=True, help="dir to place tensorboard logs from all trials") + parser = argparse.ArgumentParser(description="machine-translation") + parser.add_argument("--log_path", type=str, required=True, help="dir to place logs from all trials") parser.add_argument('--sinusoidal_position', action='store_true', default=False) parser.add_argument('--rotary_position', action='store_true', default=False) parser.add_argument('--alibi_position', action='store_true', default=False)