From cf7a027dcf4f3df1017847f22cd0ba1a25f989bb Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:36:45 +0800 Subject: [PATCH 01/12] add --device for word_language_model --- word_language_model/README.md | 7 +++++++ word_language_model/generate.py | 4 +++- word_language_model/main.py | 6 ++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/word_language_model/README.md b/word_language_model/README.md index 254b726585..ca217399c8 100644 --- a/word_language_model/README.md +++ b/word_language_model/README.md @@ -37,6 +37,7 @@ optional arguments: --seed SEED random seed --cuda use CUDA --mps enable GPU on macOS + --device DEVICE backend device --log-interval N report interval --save SAVE path to save the final model --onnx-export ONNX_EXPORT @@ -54,3 +55,9 @@ python main.py --cuda --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 --tied python main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --epochs 40 python main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --epochs 40 --tied ``` + +You can also use non-cuda devices like this: + +```bash +python main.py --device npu --emsize 650 --nhid 650 --dropout 0.5 --epochs 40 +``` diff --git a/word_language_model/generate.py b/word_language_model/generate.py index 13bd8abfcd..c201e3ff21 100644 --- a/word_language_model/generate.py +++ b/word_language_model/generate.py @@ -23,6 +23,8 @@ help='random seed') parser.add_argument('--cuda', action='store_true', help='use CUDA') +parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') parser.add_argument('--temperature', type=float, default=1.0, @@ -46,7 +48,7 @@ elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) if args.temperature < 1e-3: parser.error("--temperature has to be greater or equal 1e-3.") diff --git a/word_language_model/main.py b/word_language_model/main.py index 23bda03e73..71c4446003 100644 --- a/word_language_model/main.py +++ b/word_language_model/main.py @@ -40,7 +40,9 @@ parser.add_argument('--cuda', action='store_true', default=False, help='use CUDA') parser.add_argument('--mps', action='store_true', default=False, - help='enables macOS GPU training') + help='enables macOS GPU training') +parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--log-interval', type=int, default=200, metavar='N', help='report interval') parser.add_argument('--save', type=str, default='model.pt', @@ -68,7 +70,7 @@ elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) ############################################################################### # Load data From aeacb0e2654c82e441d7f9b0c053cfc36772b0c4 Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:37:02 +0800 Subject: [PATCH 02/12] add --device for vae --- vae/README.md | 15 ++++++++------- vae/main.py | 4 +++- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/vae/README.md b/vae/README.md index cda6a33672..393776f219 100644 --- a/vae/README.md +++ b/vae/README.md @@ -12,10 +12,11 @@ The main.py script accepts the following arguments: ```bash optional arguments: - --batch-size input batch size for training (default: 128) - --epochs number of epochs to train (default: 10) - --no-cuda enables CUDA training - --mps enables GPU on macOS - --seed random seed (default: 1) - --log-interval how many batches to wait before logging training status -``` \ No newline at end of file + --batch-size N input batch size for training (default: 128) + --epochs EPOCHS number of epochs to train (default: 10) + --no-cuda disables CUDA training + --no-mps disables macOS GPU training + --device DEVICE backend name + --seed SEED random seed (default: 1) + --log-interval N how many batches to wait before logging training status +``` diff --git a/vae/main.py b/vae/main.py index d69833fbe0..a49efe4fdd 100644 --- a/vae/main.py +++ b/vae/main.py @@ -17,6 +17,8 @@ help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') +parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', @@ -32,7 +34,7 @@ elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} train_loader = torch.utils.data.DataLoader( From 271ae40dedb28e6f12d88de1a9065ff9ca05c9ab Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:37:21 +0800 Subject: [PATCH 03/12] add --device for super_resolution --- super_resolution/README.md | 3 ++- super_resolution/main.py | 3 ++- super_resolution/super_resolve.py | 4 ++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/super_resolution/README.md b/super_resolution/README.md index 6b5fe831d9..883a9b7d12 100644 --- a/super_resolution/README.md +++ b/super_resolution/README.md @@ -5,7 +5,7 @@ This example illustrates how to use the efficient sub-pixel convolution layer de ``` usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--batchSize BATCHSIZE] [--testBatchSize TESTBATCHSIZE] [--nEpochs NEPOCHS] [--lr LR] - [--cuda] [--threads THREADS] [--seed SEED] + [--cuda] [--device DEVICE] [--threads THREADS] [--seed SEED] PyTorch Super Res Example @@ -18,6 +18,7 @@ optional arguments: --lr Learning Rate. Default=0.01 --cuda use cuda --mps enable GPU on macOS + --device DEVICE backend device --threads number of threads for data loader to use Default=4 --seed random seed to use. Default=123 ``` diff --git a/super_resolution/main.py b/super_resolution/main.py index 8c5519582a..8b6850706a 100644 --- a/super_resolution/main.py +++ b/super_resolution/main.py @@ -18,6 +18,7 @@ parser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01') parser.add_argument('--cuda', action='store_true', help='use cuda?') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') +parser.add_argument('--device', type=str, default='cpu', help='backend device') parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use') parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123') opt = parser.parse_args() @@ -37,7 +38,7 @@ elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(opt.device) print('===> Loading datasets') train_set = get_training_set(opt.upscale_factor) diff --git a/super_resolution/super_resolve.py b/super_resolution/super_resolve.py index 750d635312..2db3d5297d 100644 --- a/super_resolution/super_resolve.py +++ b/super_resolution/super_resolve.py @@ -12,6 +12,7 @@ parser.add_argument('--model', type=str, required=True, help='model file to use') parser.add_argument('--output_filename', type=str, help='where to save the output image') parser.add_argument('--cuda', action='store_true', help='use cuda') +parser.add_argument('--device', type=str, default='cpu', help='backend device') opt = parser.parse_args() print(opt) @@ -25,6 +26,9 @@ if opt.cuda: model = model.cuda() input = input.cuda() +elif opt.device: + model = model.to(opt.device) + input = input.to(opt.device) out = model(input) out = out.cpu() From 61929977a6e1f3e3bd65b348fcbb3fb5699fb752 Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:37:39 +0800 Subject: [PATCH 04/12] add --device for siamese_network --- siamese_network/main.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/siamese_network/main.py b/siamese_network/main.py index 8f420a9b01..1d72653a60 100644 --- a/siamese_network/main.py +++ b/siamese_network/main.py @@ -251,6 +251,8 @@ def main(): help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') + parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=1, metavar='S', @@ -271,7 +273,7 @@ def main(): elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) train_kwargs = {'batch_size': args.batch_size} test_kwargs = {'batch_size': args.test_batch_size} From 2e70ddadbcdbb2f64517ab5463c8977d05ea3e26 Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 10:31:47 +0800 Subject: [PATCH 05/12] add --device - dcgan - gat - gcn - language_translation --- dcgan/README.md | 3 ++- dcgan/main.py | 3 ++- gat/README.md | 3 ++- gat/main.py | 4 +++- gcn/main.py | 4 +++- language_translation/main.py | 8 ++++---- 6 files changed, 16 insertions(+), 9 deletions(-) diff --git a/dcgan/README.md b/dcgan/README.md index 3f7bdef6b1..b921bb7e22 100644 --- a/dcgan/README.md +++ b/dcgan/README.md @@ -24,7 +24,7 @@ usage: main.py [-h] --dataset DATASET --dataroot DATAROOT [--workers WORKERS] [--batchSize BATCHSIZE] [--imageSize IMAGESIZE] [--nz NZ] [--ngf NGF] [--ndf NDF] [--niter NITER] [--lr LR] [--beta1 BETA1] [--cuda] [--ngpu NGPU] [--netG NETG] - [--netD NETD] [--mps] + [--netD NETD] [--mps] [--device DEVICE] optional arguments: -h, --help show this help message and exit @@ -41,6 +41,7 @@ optional arguments: --beta1 BETA1 beta1 for adam. default=0.5 --cuda enables cuda --mps enables macOS GPU + --device backend device --ngpu NGPU number of GPUs to use --netG NETG path to netG (to continue training) --netD NETD path to netD (to continue training) diff --git a/dcgan/main.py b/dcgan/main.py index 2f45b2dbd2..9416893856 100644 --- a/dcgan/main.py +++ b/dcgan/main.py @@ -34,6 +34,7 @@ parser.add_argument('--manualSeed', type=int, help='manual seed') parser.add_argument('--classes', default='bedroom', help='comma separated list of classes for the lsun data set') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') +parser.add_argument('--device', type=str, default='cpu', help='backend device') opt = parser.parse_args() print(opt) @@ -112,7 +113,7 @@ elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(opt.device) ngpu = int(opt.ngpu) nz = int(opt.nz) diff --git a/gat/README.md b/gat/README.md index 7bb71bc17b..f4d6848331 100644 --- a/gat/README.md +++ b/gat/README.md @@ -69,7 +69,7 @@ python main.py --epochs 300 --lr 0.005 --l2 5e-4 --dropout-p 0.6 --num-heads 8 - In more detail, the `main.py` script recieves following arguments: ``` usage: main.py [-h] [--epochs EPOCHS] [--lr LR] [--l2 L2] [--dropout-p DROPOUT_P] [--hidden-dim HIDDEN_DIM] [--num-heads NUM_HEADS] [--concat-heads] [--val-every VAL_EVERY] - [--no-cuda] [--no-mps] [--dry-run] [--seed S] + [--no-cuda] [--no-mps] [--dry-run] [--seed S] [--device DEVICE] PyTorch Graph Attention Network @@ -89,6 +89,7 @@ options: epochs to wait for print training and validation evaluation (default: 20) --no-cuda disables CUDA training --no-mps disables macOS GPU training + --device DEVICE backend device --dry-run quickly check a single pass --seed S random seed (default: 13) ``` diff --git a/gat/main.py b/gat/main.py index 9c143af8ec..d70e380c94 100644 --- a/gat/main.py +++ b/gat/main.py @@ -311,6 +311,8 @@ def test(model, criterion, input, target, mask): help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') + parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=13, metavar='S', @@ -327,7 +329,7 @@ def test(model, criterion, input, target, mask): elif use_mps: device = torch.device('mps') else: - device = torch.device('cpu') + device = torch.device(args.device) print(f'Using {device} device') # Load the dataset diff --git a/gcn/main.py b/gcn/main.py index 5c8362b576..049129f64b 100644 --- a/gcn/main.py +++ b/gcn/main.py @@ -220,6 +220,8 @@ def test(model, criterion, input, target, mask): help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') + parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=42, metavar='S', @@ -236,7 +238,7 @@ def test(model, criterion, input, target, mask): elif use_mps: device = torch.device('mps') else: - device = torch.device('cpu') + device = torch.device(args.device) print(f'Using {device} device') cora_url = 'https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz' diff --git a/language_translation/main.py b/language_translation/main.py index 2b4fbb94c3..3e82748556 100644 --- a/language_translation/main.py +++ b/language_translation/main.py @@ -272,9 +272,9 @@ def main(opts): help="Default learning rate") parser.add_argument("--batch", type=int, default=128, help="Batch size") - parser.add_argument("--backend", type=str, default="cpu", - help="Batch size") - + parser.add_argument("--device", type=str, default="cpu", + help="backend device") + # Transformer settings parser.add_argument("--attn_heads", type=int, default=8, help="Number of attention heads") @@ -298,7 +298,7 @@ def main(opts): args = parser.parse_args() - DEVICE = torch.device("cuda" if args.backend == "gpu" and torch.cuda.is_available() else "cpu") + DEVICE = torch.device("cuda" if args.device == "gpu" and torch.cuda.is_available() else args.device) if args.inference: inference(args) From e44426235a43233cc5772832b479e617ff5d014c Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:38:38 +0800 Subject: [PATCH 06/12] add --device for legacy/snli --- legacy/snli/README.md | 2 +- legacy/snli/train.py | 2 +- legacy/snli/util.py | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/legacy/snli/README.md b/legacy/snli/README.md index 2da5104975..a7d2927c10 100644 --- a/legacy/snli/README.md +++ b/legacy/snli/README.md @@ -25,7 +25,7 @@ spacy Start the training process with: ```bash -python train.py --lower --word-vectors [PATH_TO_WORD_VECTORS] --vector-cache [PATH_TO_VECTOR_CACHE] --epochs [NUMBER_OF_EPOCHS] --batch-size [BATCH_SIZE] --save-path [PATH_TO_SAVE_MODEL] --gpu [GPU_NUMBER] +python train.py --lower --word-vectors [PATH_TO_WORD_VECTORS] --vector-cache [PATH_TO_VECTOR_CACHE] --epochs [NUMBER_OF_EPOCHS] --batch-size [BATCH_SIZE] --save-path [PATH_TO_SAVE_MODEL] --gpu [GPU_NUMBER] --device [BACKEND_DEVICE] ``` ## 🏋️‍♀️ Training diff --git a/legacy/snli/train.py b/legacy/snli/train.py index aa70aef45a..f664ffdfbd 100644 --- a/legacy/snli/train.py +++ b/legacy/snli/train.py @@ -20,7 +20,7 @@ elif torch.backends.mps.is_available(): device = torch.device('mps') else: - device = torch.device('cpu') + device = torch.device(args.device) inputs = data.Field(lower=args.lower, tokenize='spacy') answers = data.Field(sequential=False) diff --git a/legacy/snli/util.py b/legacy/snli/util.py index 1bc8e0b2cc..4e2b488767 100644 --- a/legacy/snli/util.py +++ b/legacy/snli/util.py @@ -20,6 +20,8 @@ def makedirs(name): def get_args(): parser = ArgumentParser(description='PyTorch/torchtext SNLI example') + parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--epochs', type=int, default=50, help='the number of total epochs to run.') parser.add_argument('--batch_size', type=int, default=128, From 1cc28edc9291d38ce5752cd226c52ce76d98b55e Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:32:14 +0800 Subject: [PATCH 07/12] add --device for mnist --- mnist/main.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mnist/main.py b/mnist/main.py index 184dc4744f..d00cbb67c0 100644 --- a/mnist/main.py +++ b/mnist/main.py @@ -86,6 +86,8 @@ def main(): help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') + parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=1, metavar='S', @@ -105,7 +107,7 @@ def main(): elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) train_kwargs = {'batch_size': args.batch_size} test_kwargs = {'batch_size': args.test_batch_size} From 6d552c321c3f3ec7630d3df150159cb7e5021979 Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Thu, 19 Sep 2024 20:18:26 +0800 Subject: [PATCH 08/12] add --device for mnist_rnn --- mnist_rnn/main.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mnist_rnn/main.py b/mnist_rnn/main.py index 2fa64c00d6..c77191ce8c 100644 --- a/mnist_rnn/main.py +++ b/mnist_rnn/main.py @@ -95,6 +95,8 @@ def main(): help='enables CUDA training') parser.add_argument('--mps', action="store_true", default=False, help="enables MPS training") + parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=1, metavar='S', @@ -110,7 +112,7 @@ def main(): elif args.mps and not args.cuda: device = "mps" else: - device = "cpu" + device = args.device device = torch.device(device) From 384ca54c4ba6637dcf465639469de4e36cb791df Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:28:57 +0800 Subject: [PATCH 09/12] add --device for mnist_forward_forward --- mnist_forward_forward/README.md | 1 + mnist_forward_forward/main.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mnist_forward_forward/README.md b/mnist_forward_forward/README.md index f6ae12e56d..6a8f70aace 100644 --- a/mnist_forward_forward/README.md +++ b/mnist_forward_forward/README.md @@ -18,6 +18,7 @@ optional arguments: --lr LR learning rate (default: 0.03) --no_cuda disables CUDA training --no_mps disables MPS training + --device DEVICE backend device --seed SEED random seed (default: 1) --save_model For saving the current Model --train_size TRAIN_SIZE diff --git a/mnist_forward_forward/main.py b/mnist_forward_forward/main.py index f137dee48a..3702b213ea 100644 --- a/mnist_forward_forward/main.py +++ b/mnist_forward_forward/main.py @@ -108,6 +108,9 @@ def train(self, x_pos, x_neg): parser.add_argument( "--no_mps", action="store_true", default=False, help="disables MPS training" ) + parser.add_argument( + '--device', type=str, default='cpu', help='backend device' + ) parser.add_argument( "--seed", type=int, default=1, metavar="S", help="random seed (default: 1)" ) @@ -145,7 +148,7 @@ def train(self, x_pos, x_neg): elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) train_kwargs = {"batch_size": args.train_size} test_kwargs = {"batch_size": args.test_size} From 00b18f023ef6596261e459e4d98c32fb5aec34b4 Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Fri, 20 Sep 2024 09:23:52 +0800 Subject: [PATCH 10/12] add --device for mnist_hogwild --- mnist_hogwild/README.md | 1 + mnist_hogwild/main.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/mnist_hogwild/README.md b/mnist_hogwild/README.md index 5f12161d53..361ff43415 100644 --- a/mnist_hogwild/README.md +++ b/mnist_hogwild/README.md @@ -21,6 +21,7 @@ optional arguments: --log_interval how many batches to wait before logging training status --num_process how many training processes to use (default: 2) --cuda enables CUDA training + --device DEVICE backend device --dry-run quickly check a single pass --save-model For Saving the current Model ``` diff --git a/mnist_hogwild/main.py b/mnist_hogwild/main.py index 6fa449233d..6af0c4da71 100644 --- a/mnist_hogwild/main.py +++ b/mnist_hogwild/main.py @@ -31,6 +31,8 @@ help='enables CUDA training') parser.add_argument('--mps', action='store_true', default=False, help='enables macOS GPU training') +parser.add_argument('--device', type=str, default='cpu', + help='backend device') parser.add_argument('--save_model', action='store_true', default=False, help='save the trained model to state_dict') parser.add_argument('--dry-run', action='store_true', default=False, @@ -65,7 +67,7 @@ def forward(self, x): elif use_mps: device = torch.device("mps") else: - device = torch.device("cpu") + device = torch.device(args.device) transform=transforms.Compose([ transforms.ToTensor(), From d7f2f6e980fd0585875c13d683fb949d2d520f6a Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Mon, 23 Sep 2024 15:03:19 +0800 Subject: [PATCH 11/12] add --device for run_python_examples.sh --- run_python_examples.sh | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/run_python_examples.sh b/run_python_examples.sh index 0e06e4cfc0..344846e95e 100755 --- a/run_python_examples.sh +++ b/run_python_examples.sh @@ -13,6 +13,15 @@ BASE_DIR="$(pwd)/$(dirname $0)" source $BASE_DIR/utils.sh +# Run on a specific backend device with 'export BACKEND_DEVICE=cpu'. It will +# be set to 'cpu' by default and has lower priority than '--cuda' and '--mps'. +# See https://github.com/pytorch/examples/pull/1288 for more information. +if [ -n "${BACKEND_DEVICE}" ]; then + DEVICE_FLAG="--device ${BACKEND_DEVICE}" +else + DEVICE_FLAG="" +fi + USE_CUDA=$(python -c "import torchvision, torch; print(torch.cuda.is_available())") case $USE_CUDA in "True") @@ -32,7 +41,7 @@ esac function dcgan() { start - python main.py --dataset fake $CUDA_FLAG --mps --dry-run || error "dcgan failed" + python main.py --dataset fake $CUDA_FLAG --mps $DEVICE_FLAG --dry-run || error "dcgan failed" } function fast_neural_style() { @@ -44,7 +53,7 @@ function fast_neural_style() { test -d "saved_models" || { error "saved models not found"; return; } echo "running fast neural style model" - python neural_style/neural_style.py eval --content-image images/content-images/amber.jpg --model saved_models/candy.pth --output-image images/output-images/amber-candy.jpg --cuda $CUDA --mps || error "neural_style.py failed" + python neural_style/neural_style.py eval --content-image images/content-images/amber.jpg --model saved_models/candy.pth --output-image images/output-images/amber-candy.jpg --cuda $CUDA --mps $DEVICE_FLAG || error "neural_style.py failed" } function imagenet() { @@ -63,36 +72,36 @@ function language_translation() { start python -m spacy download en || error "couldn't download en package from spacy" python -m spacy download de || error "couldn't download de package from spacy" - python main.py -e 1 --enc_layers 1 --dec_layers 1 --backend cpu --logging_dir output/ --dry_run || error "language translation example failed" + python main.py -e 1 --enc_layers 1 --dec_layers 1 $DEVICE_FLAG --logging_dir output/ --dry_run || error "language translation example failed" } function mnist() { start - python main.py --epochs 1 --dry-run || error "mnist example failed" + python main.py --epochs 1 --dry-run $DEVICE_FLAG || error "mnist example failed" } function mnist_forward_forward() { start - python main.py --epochs 1 --no_mps --no_cuda || error "mnist forward forward failed" + python main.py --epochs 1 --no_mps --no_cuda $DEVICE_FLAG || error "mnist forward forward failed" } function mnist_hogwild() { start - python main.py --epochs 1 --dry-run $CUDA_FLAG || error "mnist hogwild failed" + python main.py --epochs 1 --dry-run $CUDA_FLAG $DEVICE_FLAG || error "mnist hogwild failed" } function mnist_rnn() { start - python main.py --epochs 1 --dry-run || error "mnist rnn example failed" + python main.py --epochs 1 --dry-run $DEVICE_FLAG || error "mnist rnn example failed" } function regression() { start - python main.py --epochs 1 $CUDA_FLAG || error "regression failed" + python main.py --epochs 1 $CUDA_FLAG $DEVICE_FLAG || error "regression failed" } function siamese_network() { start - python main.py --epochs 1 --dry-run || error "siamese network example failed" + python main.py --epochs 1 --dry-run $DEVICE_FLAG || error "siamese network example failed" } function reinforcement_learning() { @@ -123,7 +132,7 @@ function fx() { function super_resolution() { start - python main.py --upscale_factor 3 --batchSize 4 --testBatchSize 100 --nEpochs 1 --lr 0.001 --mps || error "super resolution failed" + python main.py --upscale_factor 3 --batchSize 4 --testBatchSize 100 --nEpochs 1 --lr 0.001 --mps $DEVICE_FLAG || error "super resolution failed" } function time_sequence_prediction() { @@ -134,7 +143,7 @@ function time_sequence_prediction() { function vae() { start - python main.py --epochs 1 || error "vae failed" + python main.py --epochs 1 $DEVICE_FLAG || error "vae failed" } function vision_transformer() { @@ -144,17 +153,17 @@ function vision_transformer() { function word_language_model() { start - python main.py --epochs 1 --dry-run $CUDA_FLAG --mps || error "word_language_model failed" + python main.py --epochs 1 --dry-run $CUDA_FLAG --mps $DEVICE_FLAG || error "word_language_model failed" } function gcn() { start - python main.py --epochs 1 --dry-run || error "graph convolutional network failed" + python main.py --epochs 1 --dry-run $DEVICE_FLAG || error "graph convolutional network failed" } function gat() { start - python main.py --epochs 1 --dry-run || error "graph attention network failed" + python main.py --epochs 1 --dry-run $DEVICE_FLAG || error "graph attention network failed" } function clean() { From 2846161e5b0eee8d55b7e4a63d30380e802fb12b Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Mon, 23 Sep 2024 15:26:20 +0800 Subject: [PATCH 12/12] add --device for fast_neural_style --- fast_neural_style/README.md | 2 ++ fast_neural_style/neural_style/neural_style.py | 9 ++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/fast_neural_style/README.md b/fast_neural_style/README.md index 8057847214..674cf7e7f0 100644 --- a/fast_neural_style/README.md +++ b/fast_neural_style/README.md @@ -28,6 +28,7 @@ python neural_style/neural_style.py eval --content-image