@@ -248,7 +248,7 @@ def prepare_parser():
248
248
help = 'Default location to store all weights, samples, data, and logs '
249
249
' (default: %(default)s)' )
250
250
parser .add_argument (
251
- '--dataset_root ' , type = str , default = 'data' ,
251
+ '--data_root ' , type = str , default = 'data' ,
252
252
help = 'Default location where data is stored (default: %(default)s)' )
253
253
parser .add_argument (
254
254
'--weights_root' , type = str , default = 'weights' ,
@@ -521,15 +521,15 @@ def __len__(self):
521
521
522
522
523
523
# Convenience function to centralize all data loaders
524
- def get_data_loaders (dataset , dataset_root = None , augment = False , batch_size = 64 ,
524
+ def get_data_loaders (dataset , data_root = None , augment = False , batch_size = 64 ,
525
525
num_workers = 8 , shuffle = True , load_in_mem = False , hdf5 = False ,
526
526
pin_memory = True , drop_last = True , start_itr = 0 ,
527
527
num_epochs = 500 , use_multiepoch_sampler = False ,
528
528
** kwargs ):
529
529
530
530
# Append /FILENAME.hdf5 to root if using hdf5
531
- dataset_root += '/%s' % root_dict [dataset ]
532
- print ('Using dataset root location %s' % dataset_root )
531
+ data_root += '/%s' % root_dict [dataset ]
532
+ print ('Using dataset root location %s' % data_root )
533
533
534
534
which_dataset = dset_dict [dataset ]
535
535
norm_mean = [0.5 ,0.5 ,0.5 ]
@@ -562,7 +562,7 @@ def get_data_loaders(dataset, dataset_root=None, augment=False, batch_size=64,
562
562
train_transform = transforms .Compose (train_transform + [
563
563
transforms .ToTensor (),
564
564
transforms .Normalize (norm_mean , norm_std )])
565
- train_set = which_dataset (root = dataset_root , transform = train_transform ,
565
+ train_set = which_dataset (root = data_root , transform = train_transform ,
566
566
load_in_mem = load_in_mem , ** dataset_kwargs )
567
567
568
568
# Prepare loader; the loaders list is for forward compatibility with
0 commit comments