|
| 1 | +// RUN: %{build} -o %t.out |
| 2 | +// RUN: %{run} %t.out |
| 3 | + |
| 4 | +#include <sycl/detail/core.hpp> |
| 5 | + |
| 6 | +#include <cassert> |
| 7 | +#include <cstdint> |
| 8 | + |
| 9 | +namespace syclex = sycl::ext::oneapi::experimental; |
| 10 | +using namespace sycl::info::device; |
| 11 | +using namespace sycl::info::kernel_device_specific; |
| 12 | + |
| 13 | +using value_type = int64_t; |
| 14 | + |
| 15 | +namespace kernels { |
| 16 | + |
| 17 | +template <class T, size_t Dim> |
| 18 | +using sycl_global_accessor = |
| 19 | + sycl::accessor<T, Dim, sycl::access::mode::read_write, |
| 20 | + sycl::access::target::global_buffer>; |
| 21 | + |
| 22 | +class TestKernel { |
| 23 | +public: |
| 24 | + static constexpr bool HasLocalMemory{false}; |
| 25 | + |
| 26 | + TestKernel(sycl_global_accessor<value_type, 1> acc) : acc_{acc} {} |
| 27 | + |
| 28 | + void operator()(sycl::nd_item<1> item) const { |
| 29 | + const auto gtid = item.get_global_linear_id(); |
| 30 | + acc_[gtid] = gtid + 42; |
| 31 | + } |
| 32 | + |
| 33 | +private: |
| 34 | + sycl_global_accessor<value_type, 1> acc_; |
| 35 | +}; |
| 36 | + |
| 37 | +class TestLocalMemoryKernel { |
| 38 | +public: |
| 39 | + static constexpr bool HasLocalMemory{true}; |
| 40 | + |
| 41 | + TestLocalMemoryKernel(sycl_global_accessor<value_type, 1> acc, |
| 42 | + sycl::local_accessor<value_type, 1> loc_acc) |
| 43 | + : acc_{acc}, loc_acc_{loc_acc} {} |
| 44 | + |
| 45 | + void operator()(sycl::nd_item<1> item) const { |
| 46 | + const auto ltid = item.get_local_linear_id(); |
| 47 | + const auto gtid = item.get_global_linear_id(); |
| 48 | + if (ltid < loc_acc_.size()) { |
| 49 | + loc_acc_[ltid] = ltid + 42; |
| 50 | + item.barrier(sycl::access::fence_space::local_space); |
| 51 | + acc_[gtid] = loc_acc_[ltid]; |
| 52 | + } else { |
| 53 | + acc_[gtid] = 0; |
| 54 | + } |
| 55 | + } |
| 56 | + |
| 57 | +private: |
| 58 | + sycl_global_accessor<value_type, 1> acc_; |
| 59 | + sycl::local_accessor<value_type, 1> loc_acc_; |
| 60 | +}; |
| 61 | + |
| 62 | +} // namespace kernels |
| 63 | + |
| 64 | +namespace { |
| 65 | + |
| 66 | +template <class KernelName> |
| 67 | +int test_max_num_work_groups(sycl::queue &q, const sycl::device &dev) { |
| 68 | + const auto ctx = q.get_context(); |
| 69 | + auto bundle = sycl::get_kernel_bundle<sycl::bundle_state::executable>(ctx); |
| 70 | + auto kernel = bundle.template get_kernel<KernelName>(); |
| 71 | + |
| 72 | + const size_t maxWorkGroupSize = |
| 73 | + kernel.template get_info<work_group_size>(dev); |
| 74 | + const size_t NumWorkItems = maxWorkGroupSize * maxWorkGroupSize; |
| 75 | + |
| 76 | + size_t workGroupSize = 32; |
| 77 | + size_t localMemorySizeInBytes{0}; |
| 78 | + if constexpr (KernelName::HasLocalMemory) { |
| 79 | + localMemorySizeInBytes = workGroupSize * sizeof(value_type); |
| 80 | + } |
| 81 | + |
| 82 | + sycl::buffer<value_type, 1> buf{sycl::range<1>{NumWorkItems}}; |
| 83 | + |
| 84 | + // Tests |
| 85 | + |
| 86 | + // ==================== // |
| 87 | + // Test 1 - return type // |
| 88 | + // ==================== // |
| 89 | + sycl::range<3> workGroupRange{workGroupSize, 1, 1}; |
| 90 | + auto maxWGs = kernel.template ext_oneapi_get_info< |
| 91 | + syclex::info::kernel_queue_specific::max_num_work_groups>( |
| 92 | + q, workGroupRange, localMemorySizeInBytes); |
| 93 | + |
| 94 | + // Test the return type is as specified in the extension document. |
| 95 | + static_assert(std::is_same_v<std::remove_cv_t<decltype(maxWGs)>, size_t>, |
| 96 | + "max_num_work_groups query must return size_t"); |
| 97 | + |
| 98 | + // ===================== // |
| 99 | + // Test 2 - return value // |
| 100 | + // ===================== // |
| 101 | + // We must have at least one active group if we are below resource limits. |
| 102 | + assert(maxWGs > 0 && "max_num_work_groups query failed"); |
| 103 | + if (maxWGs == 0) |
| 104 | + return 1; |
| 105 | + |
| 106 | + // Run the kernel |
| 107 | + auto launch_range = sycl::nd_range<1>{sycl::range<1>{NumWorkItems}, |
| 108 | + sycl::range<1>{workGroupSize}}; |
| 109 | + q.submit([&](sycl::handler &cgh) { |
| 110 | + auto acc = buf.get_access<sycl::access::mode::read_write>(cgh); |
| 111 | + if constexpr (KernelName::HasLocalMemory) { |
| 112 | + sycl::local_accessor<value_type, 1> loc_acc{ |
| 113 | + sycl::range<1>{workGroupSize}, cgh}; |
| 114 | + cgh.parallel_for(launch_range, KernelName{acc, loc_acc}); |
| 115 | + } else { |
| 116 | + cgh.parallel_for(launch_range, KernelName{acc}); |
| 117 | + } |
| 118 | + }).wait(); |
| 119 | + assert(sycl::host_accessor{buf}[0] == 42); |
| 120 | + |
| 121 | + // ========================== // |
| 122 | + // Test 3 - use max resources // |
| 123 | + // ========================== // |
| 124 | + // A little over the maximum work-group size for the purpose of exceeding. |
| 125 | + workGroupSize = maxWorkGroupSize; |
| 126 | + workGroupRange[0] = workGroupSize; |
| 127 | + size_t localSize = |
| 128 | + (dev.get_info<sycl::info::device::local_mem_size>() / sizeof(value_type)); |
| 129 | + if constexpr (KernelName::HasLocalMemory) { |
| 130 | + localMemorySizeInBytes = localSize * sizeof(value_type); |
| 131 | + } |
| 132 | + maxWGs = kernel.template ext_oneapi_get_info< |
| 133 | + syclex::info::kernel_queue_specific::max_num_work_groups>( |
| 134 | + q, workGroupRange, localMemorySizeInBytes); |
| 135 | + |
| 136 | + assert(maxWGs > 0 && "max_num_work_groups query failed"); |
| 137 | + if (maxWGs == 0) |
| 138 | + return 1; |
| 139 | + |
| 140 | + launch_range = sycl::nd_range<1>{sycl::range<1>{NumWorkItems}, |
| 141 | + sycl::range<1>{workGroupSize}}; |
| 142 | + |
| 143 | + q.submit([&](sycl::handler &cgh) { |
| 144 | + auto acc = buf.get_access<sycl::access::mode::read_write>(cgh); |
| 145 | + if constexpr (KernelName::HasLocalMemory) { |
| 146 | + sycl::local_accessor<value_type, 1> loc_acc{sycl::range<1>{localSize}, |
| 147 | + cgh}; |
| 148 | + cgh.parallel_for(launch_range, KernelName{acc, loc_acc}); |
| 149 | + } else { |
| 150 | + cgh.parallel_for(launch_range, KernelName{acc}); |
| 151 | + } |
| 152 | + }).wait(); |
| 153 | + assert(sycl::host_accessor{buf}[0] == 42); |
| 154 | + |
| 155 | + // =============================== // |
| 156 | + // Test 4 - exceed resource limits // |
| 157 | + // =============================== // |
| 158 | + workGroupSize = maxWorkGroupSize + 32; |
| 159 | + workGroupRange[0] = workGroupSize; |
| 160 | + maxWGs = kernel.template ext_oneapi_get_info< |
| 161 | + syclex::info::kernel_queue_specific::max_num_work_groups>( |
| 162 | + q, workGroupRange, localMemorySizeInBytes); |
| 163 | + // It cannot be possible to launch a kernel successfully with a configuration |
| 164 | + // that exceeds the available resources as in the above defined workGroupSize. |
| 165 | + // workGroupSize is larger than maxWorkGroupSize, hence maxWGs must equal 0. |
| 166 | + if (dev.get_backend() == sycl::backend::ext_oneapi_cuda) { |
| 167 | + assert(maxWGs == 0 && "max_num_work_groups query failed"); |
| 168 | + if (maxWGs > 0) |
| 169 | + return 1; |
| 170 | + } |
| 171 | + |
| 172 | + // As we ensured that the 'max_num_work_groups' query correctly |
| 173 | + // returns 0 possible work-groups, test that the kernel launch will fail. |
| 174 | + // A configuration that defines a work-group size larger than the maximum |
| 175 | + // possible should result in failure. |
| 176 | + try { |
| 177 | + launch_range = sycl::nd_range<1>{sycl::range<1>{NumWorkItems}, |
| 178 | + sycl::range<1>{workGroupSize}}; |
| 179 | + |
| 180 | + q.submit([&](sycl::handler &cgh) { |
| 181 | + auto acc = buf.get_access<sycl::access::mode::read_write>(cgh); |
| 182 | + if constexpr (KernelName::HasLocalMemory) { |
| 183 | + sycl::local_accessor<value_type, 1> loc_acc{sycl::range<1>{localSize}, |
| 184 | + cgh}; |
| 185 | + cgh.parallel_for(launch_range, KernelName{acc, loc_acc}); |
| 186 | + } else { |
| 187 | + cgh.parallel_for(launch_range, KernelName{acc}); |
| 188 | + } |
| 189 | + }).wait(); |
| 190 | + } catch (const sycl::exception &e) { |
| 191 | + // 'nd_range' error is the expected outcome from the above launch config. |
| 192 | + if (e.code() == sycl::make_error_code(sycl::errc::nd_range)) { |
| 193 | + return 0; |
| 194 | + } |
| 195 | + std::cerr << e.code() << ":\t"; |
| 196 | + std::cerr << e.what() << std::endl; |
| 197 | + return 1; |
| 198 | + } |
| 199 | + |
| 200 | + return 0; |
| 201 | +} |
| 202 | + |
| 203 | +} // namespace |
| 204 | + |
| 205 | +int main() { |
| 206 | + sycl::queue q{}; |
| 207 | + sycl::device dev = q.get_device(); |
| 208 | + |
| 209 | + using namespace kernels; |
| 210 | + |
| 211 | + int ret{0}; |
| 212 | + ret &= test_max_num_work_groups<TestKernel>(q, dev); |
| 213 | + ret &= test_max_num_work_groups<TestLocalMemoryKernel>(q, dev); |
| 214 | + return ret; |
| 215 | +} |
0 commit comments