diff --git a/run_python_examples.sh b/run_python_examples.sh index e3521b0b30..96f259c7aa 100755 --- a/run_python_examples.sh +++ b/run_python_examples.sh @@ -153,7 +153,7 @@ function vision_transformer() { } function word_language_model() { - uv run main.py --epochs 1 --dry-run $CUDA_FLAG --mps || error "word_language_model failed" + uv run main.py --epochs 1 --dry-run $ACCEL_FLAG || error "word_language_model failed" } function gcn() { diff --git a/word_language_model/README.md b/word_language_model/README.md index 254b726585..1e05af9c72 100644 --- a/word_language_model/README.md +++ b/word_language_model/README.md @@ -4,13 +4,13 @@ This example trains a multi-layer RNN (Elman, GRU, or LSTM) or Transformer on a The trained model can then be used by the generate script to generate new text. ```bash -python main.py --cuda --epochs 6 # Train a LSTM on Wikitext-2 with CUDA. -python main.py --cuda --epochs 6 --tied # Train a tied LSTM on Wikitext-2 with CUDA. -python main.py --cuda --tied # Train a tied LSTM on Wikitext-2 with CUDA for 40 epochs. -python main.py --cuda --epochs 6 --model Transformer --lr 5 +python main.py --accel --epochs 6 # Train a LSTM on Wikitext-2 with CUDA. +python main.py --accel --epochs 6 --tied # Train a tied LSTM on Wikitext-2 with CUDA. +python main.py --accel --tied # Train a tied LSTM on Wikitext-2 with CUDA for 40 epochs. +python main.py --accel --epochs 6 --model Transformer --lr 5 # Train a Transformer model on Wikitext-2 with CUDA. -python generate.py # Generate samples from the default model checkpoint. +python generate.py --accel # Generate samples from the default model checkpoint. ``` The model uses the `nn.RNN` module (and its sister modules `nn.GRU` and `nn.LSTM`) or Transformer module (`nn.TransformerEncoder` and `nn.TransformerEncoderLayer`) which will automatically use the cuDNN backend if run on CUDA with cuDNN installed. @@ -35,8 +35,7 @@ optional arguments: --dropout DROPOUT dropout applied to layers (0 = no dropout) --tied tie the word embedding and softmax weights --seed SEED random seed - --cuda use CUDA - --mps enable GPU on macOS + --accel use accelerator --log-interval N report interval --save SAVE path to save the final model --onnx-export ONNX_EXPORT @@ -49,8 +48,8 @@ With these arguments, a variety of models can be tested. As an example, the following arguments produce slower but better models: ```bash -python main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 -python main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied -python main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --epochs 40 -python main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --epochs 40 --tied +python main.py --accel --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 +python main.py --accel --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied +python main.py --accel --emsize 1500 --nhid 1500 --dropout 0.65 --epochs 40 +python main.py --accel --emsize 1500 --nhid 1500 --dropout 0.65 --epochs 40 --tied ``` diff --git a/word_language_model/generate.py b/word_language_model/generate.py index 13bd8abfcd..49bdff1d52 100644 --- a/word_language_model/generate.py +++ b/word_language_model/generate.py @@ -21,30 +21,20 @@ help='number of words to generate') parser.add_argument('--seed', type=int, default=1111, help='random seed') -parser.add_argument('--cuda', action='store_true', - help='use CUDA') -parser.add_argument('--mps', action='store_true', default=False, - help='enables macOS GPU training') parser.add_argument('--temperature', type=float, default=1.0, help='temperature - higher will increase diversity') parser.add_argument('--log-interval', type=int, default=100, help='reporting interval') +parser.add_argument('--accel', action='store_true', default=False, + help='use accelerator') args = parser.parse_args() # Set the random seed manually for reproducibility. torch.manual_seed(args.seed) -if torch.cuda.is_available(): - if not args.cuda: - print("WARNING: You have a CUDA device, so you should probably run with --cuda.") -if torch.backends.mps.is_available(): - if not args.mps: - print("WARNING: You have mps device, to enable macOS GPU run with --mps.") - -use_mps = args.mps and torch.backends.mps.is_available() -if args.cuda: - device = torch.device("cuda") -elif use_mps: - device = torch.device("mps") + +if args.accel and torch.accelerator.is_available(): + device = torch.accelerator.current_accelerator() + else: device = torch.device("cpu") @@ -52,7 +42,7 @@ parser.error("--temperature has to be greater or equal 1e-3.") with open(args.checkpoint, 'rb') as f: - model = torch.load(f, map_location=device) + model = torch.load(f, map_location=device, weights_only=False) model.eval() corpus = data.Corpus(args.data) diff --git a/word_language_model/main.py b/word_language_model/main.py index 23bda03e73..ba2e1dbf2c 100644 --- a/word_language_model/main.py +++ b/word_language_model/main.py @@ -37,10 +37,6 @@ help='tie the word embedding and softmax weights') parser.add_argument('--seed', type=int, default=1111, help='random seed') -parser.add_argument('--cuda', action='store_true', default=False, - help='use CUDA') -parser.add_argument('--mps', action='store_true', default=False, - help='enables macOS GPU training') parser.add_argument('--log-interval', type=int, default=200, metavar='N', help='report interval') parser.add_argument('--save', type=str, default='model.pt', @@ -51,25 +47,20 @@ help='the number of heads in the encoder/decoder of the transformer model') parser.add_argument('--dry-run', action='store_true', help='verify the code and the model') +parser.add_argument('--accel', action='store_true',help='Enables accelerated training') args = parser.parse_args() # Set the random seed manually for reproducibility. torch.manual_seed(args.seed) -if torch.cuda.is_available(): - if not args.cuda: - print("WARNING: You have a CUDA device, so you should probably run with --cuda.") -if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): - if not args.mps: - print("WARNING: You have mps device, to enable macOS GPU run with --mps.") - -use_mps = args.mps and torch.backends.mps.is_available() -if args.cuda: - device = torch.device("cuda") -elif use_mps: - device = torch.device("mps") + +if args.accel and torch.accelerator.is_available(): + device = torch.accelerator.current_accelerator() + else: device = torch.device("cpu") +print("Using device:", device) + ############################################################################### # Load data ############################################################################### @@ -243,11 +234,11 @@ def export_onnx(path, batch_size, seq_len): # Load the best saved model. with open(args.save, 'rb') as f: - model = torch.load(f) + torch.load(f, weights_only=False) # after load the rnn params are not a continuous chunk of memory # this makes them a continuous chunk, and will speed up forward pass # Currently, only rnn model supports flatten_parameters function. - if args.model in ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU']: + if args.model in ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU'] and device.type == 'cuda': model.rnn.flatten_parameters() # Run on test data.