|
| 1 | +# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from nvidia.dali import fn |
| 16 | +from nvidia.dali import types |
| 17 | + |
| 18 | +from nvidia.dali.pipeline.experimental import pipeline_def |
| 19 | + |
| 20 | +from nvidia.dali.auto_aug import auto_augment, trivial_augment |
| 21 | + |
| 22 | + |
| 23 | +@pipeline_def(enable_conditionals=True) |
| 24 | +def training_pipe(data_dir, interpolation, image_size, output_layout, automatic_augmentation, |
| 25 | + dali_device="gpu", rank=0, world_size=1): |
| 26 | + rng = fn.random.coin_flip(probability=0.5) |
| 27 | + |
| 28 | + jpegs, labels = fn.readers.file(name="Reader", file_root=data_dir, shard_id=rank, |
| 29 | + num_shards=world_size, random_shuffle=True, pad_last_batch=True) |
| 30 | + |
| 31 | + if dali_device == "gpu": |
| 32 | + decoder_device = "mixed" |
| 33 | + resize_device = "gpu" |
| 34 | + else: |
| 35 | + decoder_device = "cpu" |
| 36 | + resize_device = "cpu" |
| 37 | + |
| 38 | + # This padding sets the size of the internal nvJPEG buffers to be able to handle all images |
| 39 | + # from full-sized ImageNet without additional reallocations |
| 40 | + images = fn.decoders.image_random_crop(jpegs, device=decoder_device, output_type=types.RGB, |
| 41 | + device_memory_padding=211025920, |
| 42 | + host_memory_padding=140544512, |
| 43 | + random_aspect_ratio=[0.75, 4.0 / 3.0], |
| 44 | + random_area=[0.08, 1.0]) |
| 45 | + |
| 46 | + images = fn.resize(images, device=resize_device, size=[image_size, image_size], |
| 47 | + interp_type=interpolation, antialias=False) |
| 48 | + |
| 49 | + # Make sure that from this point we are processing on GPU regardless of dali_device parameter |
| 50 | + images = images.gpu() |
| 51 | + |
| 52 | + images = fn.flip(images, horizontal=rng) |
| 53 | + |
| 54 | + # Based on the specification, apply the automatic augmentation policy. Note, that from the point |
| 55 | + # of Pipeline definition, this `if` statement relies on static scalar parameter, so it is |
| 56 | + # evaluated exactly once during build - we either include automatic augmentations or not. |
| 57 | + # We pass the shape of the image after the resize so the translate operations are done |
| 58 | + # relative to the image size. |
| 59 | + if automatic_augmentation is None: |
| 60 | + output = images |
| 61 | + elif automatic_augmentation == "autoaugment": |
| 62 | + output = auto_augment.auto_augment_image_net(images, shape=[image_size, image_size]) |
| 63 | + else: |
| 64 | + raise ValueError(f"Automatic augmentation: '{automatic_augmentation}'" |
| 65 | + f" is not supported for DALI") |
| 66 | + |
| 67 | + |
| 68 | + output = fn.crop_mirror_normalize(output, dtype=types.FLOAT, output_layout=output_layout, |
| 69 | + crop=(image_size, image_size), |
| 70 | + mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], |
| 71 | + std=[0.229 * 255, 0.224 * 255, 0.225 * 255]) |
| 72 | + |
| 73 | + return output, labels |
| 74 | + |
| 75 | + |
| 76 | +@pipeline_def |
| 77 | +def validation_pipe(data_dir, interpolation, image_size, image_crop, output_layout, rank=0, |
| 78 | + world_size=1): |
| 79 | + jpegs, label = fn.readers.file(name="Reader", file_root=data_dir, shard_id=rank, |
| 80 | + num_shards=world_size, random_shuffle=False, pad_last_batch=True) |
| 81 | + |
| 82 | + images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB) |
| 83 | + |
| 84 | + images = fn.resize(images, resize_shorter=image_size, interp_type=interpolation, |
| 85 | + antialias=False) |
| 86 | + |
| 87 | + output = fn.crop_mirror_normalize(images, dtype=types.FLOAT, output_layout=output_layout, |
| 88 | + crop=(image_crop, image_crop), |
| 89 | + mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], |
| 90 | + std=[0.229 * 255, 0.224 * 255, 0.225 * 255]) |
| 91 | + return output, label |
0 commit comments