From 1a53588ab2857eb61574cb078a1e1cc4b705dfb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Fri, 18 Feb 2022 08:29:45 +0000 Subject: [PATCH 01/18] added fma,fmax,fmin for half, bf16 and bf16x2 and approx exp2 for bf16 and bf16x2 --- libclc/ptx-nvidiacl/libspirv/math/exp2.cl | 18 +++++++ libclc/ptx-nvidiacl/libspirv/math/fma.cl | 31 +++++++++++- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 61 ++++++++++++++++++++--- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 61 ++++++++++++++++++++--- 4 files changed, 156 insertions(+), 15 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/exp2.cl b/libclc/ptx-nvidiacl/libspirv/math/exp2.cl index 6c64581da055a..0cbfdbed054f1 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/exp2.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/exp2.cl @@ -15,3 +15,21 @@ #define __CLC_BUILTIN __nv_exp2 #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) #include + +_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_exp2(ushort x) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_ex2_approx_bf16(x); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_exp2, ushort) + +_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_exp2(uint x) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_ex2_approx_bf16x2(x); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_exp2, uint) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fma.cl b/libclc/ptx-nvidiacl/libspirv/math/fma.cl index f887289c03b29..96bf78240fbfa 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fma.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fma.cl @@ -11,6 +11,8 @@ #include "../../include/libdevice.h" #include +extern int __clc_nvvm_reflect_arch(); + _CLC_DEFINE_TERNARY_BUILTIN(float, __spirv_ocl_fma, __nv_fmaf, float, float, float) @@ -27,10 +29,37 @@ _CLC_DEFINE_TERNARY_BUILTIN(double, __spirv_ocl_fma, __nv_fma, double, double, #pragma OPENCL EXTENSION cl_khr_fp16 : enable -_CLC_DEFINE_TERNARY_BUILTIN(half, __spirv_ocl_fma, __nv_fmaf, half, half, half) +_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fma(half x, half y, half z) { + if (__clc_nvvm_reflect_arch() >= 530) { + return __nvvm_fma_rn_f16(x, y, z); + } + return __nv_fmaf(x,y,z); +} +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fma, half, + half, half) #endif +_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fma(ushort x, ushort y, ushort z) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fma_rn_bf16(x, y, z); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fma, ushort, + ushort, ushort) + +_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fma(uint x, uint y, uint z) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fma_rn_bf16x2(x, y, z); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fma, uint, + uint, uint) + #undef __CLC_BUILTIN #undef __CLC_BUILTIN_F #undef __CLC_FUNCTION diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index 0ff8f83d6cc05..bcc81657312ed 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -6,12 +6,59 @@ // //===----------------------------------------------------------------------===// -#include - -#include "../../include/libdevice.h" #include -#define __CLC_FUNCTION __spirv_ocl_fmax -#define __CLC_BUILTIN __nv_fmax -#define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) -#include +extern int __clc_nvvm_reflect_arch(); + +_CLC_DEF _CLC_OVERLOAD float __spirv_ocl_fmax(float x, float y) { + return __nvvm_fmax_f(x, y); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, float, __spirv_ocl_fmax, float, + float) + +#ifdef cl_khr_fp64 + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable + +_CLC_DEF _CLC_OVERLOAD double __spirv_ocl_fmax(double x, double y) { + return __nvvm_fmax_d(x, y); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, __spirv_ocl_fmax, double, + double) + +#endif + +#ifdef cl_khr_fp16 + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmax(half x, half y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fmax_f16(x, y); + } + return x > y ? x : y; +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmax, half, + half) + +#endif + +_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fmax(ushort x, ushort y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_max_rn_bf16(x, y); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fmax, ushort, + ushort) + +_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fmax(uint x, uint y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_max_rn_bf16x2(x, y); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fmax, uint, + uint) \ No newline at end of file diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index ef09ba1b94ec0..e99ccc6d89bac 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -6,12 +6,59 @@ // //===----------------------------------------------------------------------===// -#include - -#include "../../include/libdevice.h" #include -#define __CLC_FUNCTION __spirv_ocl_fmin -#define __CLC_BUILTIN __nv_fmin -#define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) -#include +extern int __clc_nvvm_reflect_arch(); + +_CLC_DEF _CLC_OVERLOAD float __spirv_ocl_fmin(float x, float y) { + return __nvvm_fmin_f(x, y); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, float, __spirv_ocl_fmin, float, + float) + +#ifdef cl_khr_fp64 + +#pragma OPENCL EXTENSION cl_khr_fp64 : enable + +_CLC_DEF _CLC_OVERLOAD double __spirv_ocl_fmin(double x, double y) { + return __nvvm_fmin_d(x, y); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, __spirv_ocl_fmin, double, + double) + +#endif + +#ifdef cl_khr_fp16 + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmin(half x, half y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fmin_f16(x, y); + } + return x < y ? x : y; +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmin, half, + half) + +#endif + +_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fmin(ushort x, ushort y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_max_rn_bf16(x, y); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fmin, ushort, + ushort) + +_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fmin(uint x, uint y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_max_rn_bf16x2(x, y); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fmin, uint, + uint) \ No newline at end of file From 32789b422c40ed0ef71a9d1166aa782dacbefba8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Fri, 18 Feb 2022 08:40:31 +0000 Subject: [PATCH 02/18] Added missing includes and forward declarations --- libclc/ptx-nvidiacl/libspirv/math/exp2.cl | 2 ++ libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 1 + libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 1 + 3 files changed, 4 insertions(+) diff --git a/libclc/ptx-nvidiacl/libspirv/math/exp2.cl b/libclc/ptx-nvidiacl/libspirv/math/exp2.cl index 0cbfdbed054f1..dee3485bd7ba1 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/exp2.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/exp2.cl @@ -16,6 +16,8 @@ #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) #include +extern int __clc_nvvm_reflect_arch(); + _CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_exp2(ushort x) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_ex2_approx_bf16(x); diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index bcc81657312ed..5009b2ec163f5 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include +#include extern int __clc_nvvm_reflect_arch(); diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index e99ccc6d89bac..fe22d2afd3837 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include +#include extern int __clc_nvvm_reflect_arch(); From f20a4bdd84c5428b38d3185bfcd81d38df4149a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Mon, 21 Feb 2022 08:08:52 +0000 Subject: [PATCH 03/18] removed exp2 bf16 implementations that do not have builtins --- libclc/ptx-nvidiacl/libspirv/math/exp2.cl | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/exp2.cl b/libclc/ptx-nvidiacl/libspirv/math/exp2.cl index dee3485bd7ba1..6c64581da055a 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/exp2.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/exp2.cl @@ -15,23 +15,3 @@ #define __CLC_BUILTIN __nv_exp2 #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) #include - -extern int __clc_nvvm_reflect_arch(); - -_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_exp2(ushort x) { - if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_ex2_approx_bf16(x); - } - __builtin_trap(); - __builtin_unreachable(); -} -_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_exp2, ushort) - -_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_exp2(uint x) { - if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_ex2_approx_bf16x2(x); - } - __builtin_trap(); - __builtin_unreachable(); -} -_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_exp2, uint) From a780a06258ede2f27e958dae40537f84e531e7b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Mon, 21 Feb 2022 08:38:38 +0000 Subject: [PATCH 04/18] added optimized half2 overloads for fma, fmin and fmax --- libclc/generic/include/clcmacro.h | 44 ++++++++++++++--------- libclc/ptx-nvidiacl/libspirv/math/fma.cl | 14 ++++++-- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 10 ++++-- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 8 ++++- 4 files changed, 53 insertions(+), 23 deletions(-) diff --git a/libclc/generic/include/clcmacro.h b/libclc/generic/include/clcmacro.h index d4167a8e4529e..addb461aa047d 100644 --- a/libclc/generic/include/clcmacro.h +++ b/libclc/generic/include/clcmacro.h @@ -9,11 +9,7 @@ #ifndef __CLC_MACRO_H #define __CLC_MACRO_H -#define _CLC_UNARY_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE) \ - DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE##2 x) { \ - return (RET_TYPE##2)(FUNCTION(x.x), FUNCTION(x.y)); \ - } \ - \ +#define _CLC_UNARY_VECTORIZE_HAVE2(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE) \ DECLSPEC RET_TYPE##3 FUNCTION(ARG1_TYPE##3 x) { \ return (RET_TYPE##3)(FUNCTION(x.x), FUNCTION(x.y), FUNCTION(x.z)); \ } \ @@ -30,12 +26,14 @@ return (RET_TYPE##16)(FUNCTION(x.lo), FUNCTION(x.hi)); \ } -#define _CLC_BINARY_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ - ARG2_TYPE) \ - DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE##2 x, ARG2_TYPE##2 y) { \ - return (RET_TYPE##2)(FUNCTION(x.x, y.x), FUNCTION(x.y, y.y)); \ +#define _CLC_UNARY_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE) \ + DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE##2 x) { \ + return (RET_TYPE##2)(FUNCTION(x.x), FUNCTION(x.y)); \ } \ - \ + _CLC_UNARY_VECTORIZE_HAVE2(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE) + +#define _CLC_BINARY_VECTORIZE_HAVE2(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ + ARG2_TYPE) \ DECLSPEC RET_TYPE##3 FUNCTION(ARG1_TYPE##3 x, ARG2_TYPE##3 y) { \ return (RET_TYPE##3)(FUNCTION(x.x, y.x), FUNCTION(x.y, y.y), \ FUNCTION(x.z, y.z)); \ @@ -53,6 +51,14 @@ return (RET_TYPE##16)(FUNCTION(x.lo, y.lo), FUNCTION(x.hi, y.hi)); \ } +#define _CLC_BINARY_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ + ARG2_TYPE) \ + DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE##2 x, ARG2_TYPE##2 y) { \ + return (RET_TYPE##2)(FUNCTION(x.x, y.x), FUNCTION(x.y, y.y)); \ + } \ + _CLC_BINARY_VECTORIZE_HAVE2(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ + ARG2_TYPE) + #define _CLC_V_S_V_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ ARG2_TYPE) \ DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE x, ARG2_TYPE##2 y) { \ @@ -76,13 +82,8 @@ return (RET_TYPE##16)(FUNCTION(x, y.lo), FUNCTION(x, y.hi)); \ } -#define _CLC_TERNARY_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ - ARG2_TYPE, ARG3_TYPE) \ - DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE##2 x, ARG2_TYPE##2 y, \ - ARG3_TYPE##2 z) { \ - return (RET_TYPE##2)(FUNCTION(x.x, y.x, z.x), FUNCTION(x.y, y.y, z.y)); \ - } \ - \ +#define _CLC_TERNARY_VECTORIZE_HAVE2(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ + ARG2_TYPE, ARG3_TYPE) \ DECLSPEC RET_TYPE##3 FUNCTION(ARG1_TYPE##3 x, ARG2_TYPE##3 y, \ ARG3_TYPE##3 z) { \ return (RET_TYPE##3)(FUNCTION(x.x, y.x, z.x), FUNCTION(x.y, y.y, z.y), \ @@ -107,6 +108,15 @@ FUNCTION(x.hi, y.hi, z.hi)); \ } +#define _CLC_TERNARY_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ + ARG2_TYPE, ARG3_TYPE) \ + DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE##2 x, ARG2_TYPE##2 y, \ + ARG3_TYPE##2 z) { \ + return (RET_TYPE##2)(FUNCTION(x.x, y.x, z.x), FUNCTION(x.y, y.y, z.y)); \ + } \ + _CLC_TERNARY_VECTORIZE_HAVE2(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ + ARG2_TYPE, ARG3_TYPE) + #define _CLC_V_S_S_V_VECTORIZE(DECLSPEC, RET_TYPE, FUNCTION, ARG1_TYPE, \ ARG2_TYPE, ARG3_TYPE) \ DECLSPEC RET_TYPE##2 FUNCTION(ARG1_TYPE x, ARG2_TYPE y, ARG3_TYPE##2 z) { \ diff --git a/libclc/ptx-nvidiacl/libspirv/math/fma.cl b/libclc/ptx-nvidiacl/libspirv/math/fma.cl index 96bf78240fbfa..40a883e2a8045 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fma.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fma.cl @@ -33,10 +33,18 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fma(half x, half y, half z) { if (__clc_nvvm_reflect_arch() >= 530) { return __nvvm_fma_rn_f16(x, y, z); } - return __nv_fmaf(x,y,z); + return __nv_fmaf(x, y, z); } -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fma, half, - half, half) + +_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fma(half2 x, half2 y, half2 z) { + if (__clc_nvvm_reflect_arch() >= 530) { + return __nvvm_fma_rn_f16x2(x, y, z); + } + return (half2)(__spirv_ocl_fma(x.x, y.x, z.x), + __spirv_ocl_fma(x.y, y.y, z.y)); +} +_CLC_TERNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fma, + half, half, half) #endif diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index 5009b2ec163f5..2cdc4c7005322 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -39,8 +39,14 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmax(half x, half y) { } return x > y ? x : y; } -_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmax, half, - half) +_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fmax(half2 x, half2 y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fmax_f16x2(x, y); + } + return (half2)(__spirv_ocl_fmax(x.x, y.x), __spirv_ocl_fmax(x.y, y.y)); +} +_CLC_BINARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmax, + half, half) #endif diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index fe22d2afd3837..28a2d6c5c7688 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -39,7 +39,13 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmin(half x, half y) { } return x < y ? x : y; } -_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmin, half, +_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fmin(half2 x, half2 y) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fmin_f16x2(x, y); + } + return (half2)(__spirv_ocl_fmin(x.x, y.x), __spirv_ocl_fmin(x.y, y.y)); +} +_CLC_BINARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmin, half, half) #endif From 21d98c10befa4b0e2c44fd359d50343ed0816ead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Mon, 21 Feb 2022 13:13:14 +0000 Subject: [PATCH 05/18] added fma_relu for half, bf16 and bf16x2 --- libclc/ptx-nvidiacl/libspirv/SOURCES | 1 + libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl | 63 +++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100755 libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl diff --git a/libclc/ptx-nvidiacl/libspirv/SOURCES b/libclc/ptx-nvidiacl/libspirv/SOURCES index 788ecc0426a0a..efef64259f9d6 100644 --- a/libclc/ptx-nvidiacl/libspirv/SOURCES +++ b/libclc/ptx-nvidiacl/libspirv/SOURCES @@ -27,6 +27,7 @@ math/fabs.cl math/fdim.cl math/floor.cl math/fma.cl +math/fma_relu.cl math/fmax.cl math/fmin.cl math/fmod.cl diff --git a/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl b/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl new file mode 100755 index 0000000000000..61b19f2427160 --- /dev/null +++ b/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl @@ -0,0 +1,63 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include "../../include/libdevice.h" +#include + +extern int __clc_nvvm_reflect_arch(); + +#ifdef cl_khr_fp16 + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fma_relu(half x, half y, half z) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fma_rn_relu_f16(x, y, z); + } + __builtin_trap(); + __builtin_unreachable(); +} + +_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fma_relu(half2 x, half2 y, half2 z) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fma_rn_relu_f16x2(x, y, z); + } + return (half2)(__spirv_ocl_fma_relu(x.x, y.x, z.x), + __spirv_ocl_fma_relu(x.y, y.y, z.y)); +} +_CLC_TERNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fma_relu, + half, half, half) + +#endif + +_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fma_relu(ushort x, ushort y, + ushort z) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fma_rn_relu_bf16(x, y, z); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fma_relu, + ushort, ushort, ushort) + +_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fma_relu(uint x, uint y, uint z) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fma_rn_relu_bf16x2(x, y, z); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fma_relu, uint, + uint, uint) + +#undef __CLC_BUILTIN +#undef __CLC_BUILTIN_F +#undef __CLC_FUNCTION From 59758bdf6ea88513321b709b7e7d4eac0e2d878c Mon Sep 17 00:00:00 2001 From: Jakub Chlanda Date: Thu, 24 Feb 2022 09:58:57 +0000 Subject: [PATCH 06/18] [LIBCL][NVPTX] Add support for half tys native_exp2 --- .../ptx-nvidiacl/libspirv/math/native_exp2.cl | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl b/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl index 93c12c0aeb067..3cecd6ca93cce 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl @@ -14,4 +14,41 @@ #define __CLC_FUNCTION __spirv_ocl_native_exp2 #define __CLC_BUILTIN __nv_exp2 #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) + +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +int __clc_nvvm_reflect_arch(); + +_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_native_exp2(half x) { + if (__clc_nvvm_reflect_arch() >= 750) { + return __nvvm_ex2_approx_f16(x); + } else { + float upcast = x; + return __spirv_ocl_native_exp2(upcast); + } +} + +_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_native_exp2(half2 x) { + if (__clc_nvvm_reflect_arch() >= 750) { + return __nvvm_ex2_approx_f16x2(x); + } else { + float upcast0 = x[0]; + float upcast1 = x[1]; + half2 res; + res.s0 = __spirv_ocl_native_exp2(upcast0); + res.s1 = __spirv_ocl_native_exp2(upcast1); + return res; + } +} + +_CLC_UNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, + __spirv_ocl_native_exp2, half) + +#endif // cl_khr_fp16 + +// Undef halfs before uncluding unary builtins, as they are handled above. +#ifdef cl_khr_fp16 +#undef cl_khr_fp16 +#endif // cl_khr_fp16 #include From b362369fa8acec7ff9ad0b47368586de70bb9565 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Thu, 24 Feb 2022 13:13:52 +0000 Subject: [PATCH 07/18] removed redundant undefs --- libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl b/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl index 61b19f2427160..35998be1eb86a 100755 --- a/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl @@ -57,7 +57,3 @@ _CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fma_relu(uint x, uint y, uint z) { } _CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fma_relu, uint, uint, uint) - -#undef __CLC_BUILTIN -#undef __CLC_BUILTIN_F -#undef __CLC_FUNCTION From 86a6f42d303d381918076c5b0aeb1edcffd359aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Tue, 1 Mar 2022 12:35:47 +0000 Subject: [PATCH 08/18] changed prefix for fma_relu to __clc --- libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl b/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl index 35998be1eb86a..b48e25c7c628d 100755 --- a/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fma_relu.cl @@ -17,7 +17,7 @@ extern int __clc_nvvm_reflect_arch(); #pragma OPENCL EXTENSION cl_khr_fp16 : enable -_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fma_relu(half x, half y, half z) { +_CLC_DEF _CLC_OVERLOAD half __clc_fma_relu(half x, half y, half z) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fma_rn_relu_f16(x, y, z); } @@ -25,19 +25,19 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fma_relu(half x, half y, half z) { __builtin_unreachable(); } -_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fma_relu(half2 x, half2 y, half2 z) { +_CLC_DEF _CLC_OVERLOAD half2 __clc_fma_relu(half2 x, half2 y, half2 z) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fma_rn_relu_f16x2(x, y, z); } - return (half2)(__spirv_ocl_fma_relu(x.x, y.x, z.x), - __spirv_ocl_fma_relu(x.y, y.y, z.y)); + return (half2)(__clc_fma_relu(x.x, y.x, z.x), + __clc_fma_relu(x.y, y.y, z.y)); } -_CLC_TERNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fma_relu, +_CLC_TERNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __clc_fma_relu, half, half, half) #endif -_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fma_relu(ushort x, ushort y, +_CLC_DEF _CLC_OVERLOAD ushort __clc_fma_relu(ushort x, ushort y, ushort z) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fma_rn_relu_bf16(x, y, z); @@ -45,15 +45,15 @@ _CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fma_relu(ushort x, ushort y, __builtin_trap(); __builtin_unreachable(); } -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fma_relu, +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fma_relu, ushort, ushort, ushort) -_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fma_relu(uint x, uint y, uint z) { +_CLC_DEF _CLC_OVERLOAD uint __clc_fma_relu(uint x, uint y, uint z) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fma_rn_relu_bf16x2(x, y, z); } __builtin_trap(); __builtin_unreachable(); } -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fma_relu, uint, +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fma_relu, uint, uint, uint) From 0802831478df6492417d32fbea67f9e6fb7177d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Tue, 1 Mar 2022 12:47:16 +0000 Subject: [PATCH 09/18] changed bf16 builtins to use __clc prefix --- libclc/ptx-nvidiacl/libspirv/math/fma.cl | 8 ++++---- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 8 ++++---- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fma.cl b/libclc/ptx-nvidiacl/libspirv/math/fma.cl index 40a883e2a8045..4cfdc2821e12e 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fma.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fma.cl @@ -48,24 +48,24 @@ _CLC_TERNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fma, #endif -_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fma(ushort x, ushort y, ushort z) { +_CLC_DEF _CLC_OVERLOAD ushort __clc_fma(ushort x, ushort y, ushort z) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fma_rn_bf16(x, y, z); } __builtin_trap(); __builtin_unreachable(); } -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fma, ushort, +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fma, ushort, ushort, ushort) -_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fma(uint x, uint y, uint z) { +_CLC_DEF _CLC_OVERLOAD uint __clc_fma(uint x, uint y, uint z) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fma_rn_bf16x2(x, y, z); } __builtin_trap(); __builtin_unreachable(); } -_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fma, uint, +_CLC_TERNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fma, uint, uint, uint) #undef __CLC_BUILTIN diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index 2cdc4c7005322..3d1e82ab1c6dd 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -50,22 +50,22 @@ _CLC_BINARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmax, #endif -_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fmax(ushort x, ushort y) { +_CLC_DEF _CLC_OVERLOAD ushort __clc_fmax(ushort x, ushort y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_max_rn_bf16(x, y); } __builtin_trap(); __builtin_unreachable(); } -_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fmax, ushort, +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fmax, ushort, ushort) -_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fmax(uint x, uint y) { +_CLC_DEF _CLC_OVERLOAD uint __clc_fmax(uint x, uint y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_max_rn_bf16x2(x, y); } __builtin_trap(); __builtin_unreachable(); } -_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fmax, uint, +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fmax, uint, uint) \ No newline at end of file diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index 28a2d6c5c7688..8796a90044f2d 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -50,22 +50,22 @@ _CLC_BINARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmin, half #endif -_CLC_DEF _CLC_OVERLOAD ushort __spirv_ocl_fmin(ushort x, ushort y) { +_CLC_DEF _CLC_OVERLOAD ushort __clc_fmin(ushort x, ushort y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_max_rn_bf16(x, y); } __builtin_trap(); __builtin_unreachable(); } -_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __spirv_ocl_fmin, ushort, +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fmin, ushort, ushort) -_CLC_DEF _CLC_OVERLOAD uint __spirv_ocl_fmin(uint x, uint y) { +_CLC_DEF _CLC_OVERLOAD uint __clc_fmin(uint x, uint y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_max_rn_bf16x2(x, y); } __builtin_trap(); __builtin_unreachable(); } -_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __spirv_ocl_fmin, uint, +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fmin, uint, uint) \ No newline at end of file From be46eb49e0d4af8d3deadcf0b7f11781fcaaf031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Tue, 1 Mar 2022 12:47:50 +0000 Subject: [PATCH 10/18] added bf16 fabs builtins --- libclc/ptx-nvidiacl/libspirv/math/fabs.cl | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fabs.cl b/libclc/ptx-nvidiacl/libspirv/math/fabs.cl index 753f449fafefb..1d896d07ecae3 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fabs.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fabs.cl @@ -15,3 +15,21 @@ #define __CLC_BUILTIN __nv_fabs #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) #include + +_CLC_DEF _CLC_OVERLOAD ushort __clc_fabs(ushort x) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fabs_bf16(x); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fabs, ushort) + +_CLC_DEF _CLC_OVERLOAD uint __clc_fabs(uint x) { + if (__clc_nvvm_reflect_arch() >= 800) { + return __nvvm_fabs_bf16x2(x); + } + __builtin_trap(); + __builtin_unreachable(); +} +_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fabs, uint) \ No newline at end of file From f00535ea081c433b2dc4af906e06affc550b779b Mon Sep 17 00:00:00 2001 From: Jakub Chlanda Date: Tue, 1 Mar 2022 10:15:07 -0800 Subject: [PATCH 11/18] [NVPTX] Expose float tys min, max, abs, neg as builtins Adds support for the following builtins: abs, neg: - .bf16, - .bf16x2 min, max - {.ftz}{.NaN}{.xorsign.abs}.f16 - {.ftz}{.NaN}{.xorsign.abs}.f16x2 - {.NaN}{.xorsign.abs}.bf16 - {.NaN}{.xorsign.abs}.bf16x2 - {.ftz}{.NaN}{.xorsign.abs}.f32 Differential Revision: https://reviews.llvm.org/D117887 --- clang/include/clang/Basic/BuiltinsNVPTX.def | 93 +++++++++++++- .../CodeGen/builtins-nvptx-native-half-type.c | 103 ++++++++++++++++ clang/test/CodeGen/builtins-nvptx.c | 115 ++++++++++++++++++ 3 files changed, 306 insertions(+), 5 deletions(-) create mode 100644 clang/test/CodeGen/builtins-nvptx-native-half-type.c diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.def b/clang/include/clang/Basic/BuiltinsNVPTX.def index 449e4d1256944..926f6afca3acf 100644 --- a/clang/include/clang/Basic/BuiltinsNVPTX.def +++ b/clang/include/clang/Basic/BuiltinsNVPTX.def @@ -110,13 +110,89 @@ BUILTIN(__nvvm_prmt, "UiUiUiUi", "") // Min Max -BUILTIN(__nvvm_fmax_ftz_f, "fff", "") -BUILTIN(__nvvm_fmax_f, "fff", "") -BUILTIN(__nvvm_fmin_ftz_f, "fff", "") -BUILTIN(__nvvm_fmin_f, "fff", "") +TARGET_BUILTIN(__nvvm_fmin_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_ftz_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_nan_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_ftz_nan_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_ftz_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f16, "hhh", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_ftz_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_ftz_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_ftz_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_bf16, "UsUsUs", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_nan_bf16, "UsUsUs", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16, "UsUsUs", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16, "UsUsUs", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_nan_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16x2, "ZUiZUiZUi", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16x2, "ZUiZUiZUi", "", + AND(SM_86, PTX72)) +BUILTIN(__nvvm_fmin_f, "fff", "") +BUILTIN(__nvvm_fmin_ftz_f, "fff", "") +TARGET_BUILTIN(__nvvm_fmin_nan_f, "fff", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_ftz_nan_f, "fff", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_ftz_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +BUILTIN(__nvvm_fmin_d, "ddd", "") +TARGET_BUILTIN(__nvvm_fmax_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_ftz_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_nan_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_ftz_nan_f16, "hhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_ftz_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f16, "hhh", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_ftz_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_ftz_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_ftz_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f16x2, "V2hV2hV2h", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_bf16, "UsUsUs", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_nan_bf16, "UsUsUs", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16, "UsUsUs", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16, "UsUsUs", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_nan_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16x2, "ZUiZUiZUi", "", + AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16x2, "ZUiZUiZUi", "", + AND(SM_86, PTX72)) +BUILTIN(__nvvm_fmax_f, "fff", "") +BUILTIN(__nvvm_fmax_ftz_f, "fff", "") +TARGET_BUILTIN(__nvvm_fmax_nan_f, "fff", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_ftz_nan_f, "fff", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_ftz_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) +TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72)) BUILTIN(__nvvm_fmax_d, "ddd", "") -BUILTIN(__nvvm_fmin_d, "ddd", "") // Multiplication @@ -2301,6 +2377,13 @@ TARGET_BUILTIN(__nvvm_cp_async_commit_group, "v", "", AND(SM_80,PTX70)) TARGET_BUILTIN(__nvvm_cp_async_wait_group, "vIi", "", AND(SM_80,PTX70)) TARGET_BUILTIN(__nvvm_cp_async_wait_all, "v", "", AND(SM_80,PTX70)) + +// bf16, bf16x2 abs, neg +TARGET_BUILTIN(__nvvm_abs_bf16, "UsUs", "", AND(SM_80,PTX70)) +TARGET_BUILTIN(__nvvm_abs_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70)) +TARGET_BUILTIN(__nvvm_neg_bf16, "UsUs", "", AND(SM_80,PTX70)) +TARGET_BUILTIN(__nvvm_neg_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70)) + #undef BUILTIN #undef TARGET_BUILTIN #pragma pop_macro("AND") diff --git a/clang/test/CodeGen/builtins-nvptx-native-half-type.c b/clang/test/CodeGen/builtins-nvptx-native-half-type.c new file mode 100644 index 0000000000000..4440b274f670f --- /dev/null +++ b/clang/test/CodeGen/builtins-nvptx-native-half-type.c @@ -0,0 +1,103 @@ +// REQUIRES: nvptx-registered-target + +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ +// RUN: sm_80 -target-feature +ptx70 -fcuda-is-device -fnative-half-type -S \ +// RUN: -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX70_SM80 %s + +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown \ +// RUN: -target-cpu sm_80 -target-feature +ptx70 -fcuda-is-device \ +// RUN: -fnative-half-type -S -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX70_SM80 %s + +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ +// RUN: sm_86 -target-feature +ptx72 -fcuda-is-device -fnative-half-type -S \ +// RUN: -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX72_SM86 %s + +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown \ +// RUN: -target-cpu sm_86 -target-feature +ptx72 -fcuda-is-device \ +// RUN: -fnative-half-type -S -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX72_SM86 %s + +#define __device__ __attribute__((device)) + +// CHECK-LABEL: nvvm_min_max_sm80 +__device__ void nvvm_min_max_sm80() { +#if __CUDA_ARCH__ >= 800 + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmin.f16 + __nvvm_fmin_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmin.ftz.f16 + __nvvm_fmin_ftz_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmin.nan.f16 + __nvvm_fmin_nan_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmin.ftz.nan.f16 + __nvvm_fmin_ftz_nan_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmin.f16x2 + __nvvm_fmin_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmin.ftz.f16x2 + __nvvm_fmin_ftz_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmin.nan.f16x2 + __nvvm_fmin_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmin.ftz.nan.f16x2 + __nvvm_fmin_ftz_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmax.f16 + __nvvm_fmax_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmax.ftz.f16 + __nvvm_fmax_ftz_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmax.nan.f16 + __nvvm_fmax_nan_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fmax.ftz.nan.f16 + __nvvm_fmax_ftz_nan_f16(0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmax.f16x2 + __nvvm_fmax_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmax.ftz.f16x2 + __nvvm_fmax_ftz_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmax.nan.f16x2 + __nvvm_fmax_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fmax.ftz.nan.f16x2 + __nvvm_fmax_ftz_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); +#endif + // CHECK: ret void +} + +// CHECK-LABEL: nvvm_min_max_sm86 +__device__ void nvvm_min_max_sm86() { +#if __CUDA_ARCH__ >= 860 + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmin.xorsign.abs.f16 + __nvvm_fmin_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmin.ftz.xorsign.abs.f16 + __nvvm_fmin_ftz_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmin.nan.xorsign.abs.f16 + __nvvm_fmin_nan_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16 + __nvvm_fmin_ftz_nan_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmin.xorsign.abs.f16x2 + __nvvm_fmin_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmin.ftz.xorsign.abs.f16x2 + __nvvm_fmin_ftz_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmin.nan.xorsign.abs.f16x2 + __nvvm_fmin_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2 + __nvvm_fmin_ftz_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmax.xorsign.abs.f16 + __nvvm_fmax_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmax.ftz.xorsign.abs.f16 + __nvvm_fmax_ftz_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmax.nan.xorsign.abs.f16 + __nvvm_fmax_nan_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call half @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16 + __nvvm_fmax_ftz_nan_xorsign_abs_f16(0.1f16, 0.1f16); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmax.xorsign.abs.f16x2 + __nvvm_fmax_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmax.ftz.xorsign.abs.f16x2 + __nvvm_fmax_ftz_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmax.nan.xorsign.abs.f16x2 + __nvvm_fmax_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); + // CHECK_PTX72_SM86: call <2 x half> @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2 + __nvvm_fmax_ftz_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}); +#endif + // CHECK: ret void +} diff --git a/clang/test/CodeGen/builtins-nvptx.c b/clang/test/CodeGen/builtins-nvptx.c index bbd60effc70e4..9789df8977341 100644 --- a/clang/test/CodeGen/builtins-nvptx.c +++ b/clang/test/CodeGen/builtins-nvptx.c @@ -16,6 +16,12 @@ // RUN: | FileCheck -check-prefix=CHECK -check-prefix=LP64 %s // RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_53 \ // RUN: -DERROR_CHECK -fcuda-is-device -S -o /dev/null -x cuda -verify %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu sm_86 -target-feature +ptx72 \ +// RUN: -fcuda-is-device -S -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX72_SM86 -check-prefix=LP32 %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_86 -target-feature +ptx72 \ +// RUN: -fcuda-is-device -S -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX72_SM86 -check-prefix=LP64 %s #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) @@ -4231,3 +4237,112 @@ __device__ void nvvm_cvt_sm80() { #endif // CHECK: ret void } + +// CHECK-LABEL: nvvm_abs_neg_bf16_bf16x2_sm80 +__device__ void nvvm_abs_neg_bf16_bf16x2_sm80() { +#if __CUDA_ARCH__ >= 800 + + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.abs.bf16(i16 -1) + __nvvm_abs_bf16(0xFFFF); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.abs.bf16x2(i32 -1) + __nvvm_abs_bf16x2(0xFFFFFFFF); + + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.neg.bf16(i16 -1) + __nvvm_neg_bf16(0xFFFF); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.neg.bf16x2(i32 -1) + __nvvm_neg_bf16x2(0xFFFFFFFF); +#endif + // CHECK: ret void +} + +#define NAN32 0x7FBFFFFF +#define NAN16 0x7FBF +#define BF16 0x1234 +#define BF16_2 0x4321 +#define NANBF16 0xFFC1 +#define BF16X2 0x12341234 +#define BF16X2_2 0x32343234 +#define NANBF16X2 0xFFC1FFC1 + +// CHECK-LABEL: nvvm_min_max_sm80 +__device__ void nvvm_min_max_sm80() { +#if __CUDA_ARCH__ >= 800 + + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmin.nan.f + __nvvm_fmin_nan_f(0.1f, (float)NAN32); + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmin.ftz.nan.f + __nvvm_fmin_ftz_nan_f(0.1f, (float)NAN32); + + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.fmin.bf16 + __nvvm_fmin_bf16(BF16, BF16_2); + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.fmin.nan.bf16 + __nvvm_fmin_nan_bf16(BF16, NANBF16); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.fmin.bf16x2 + __nvvm_fmin_bf16x2(BF16X2, BF16X2_2); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.fmin.nan.bf16x2 + __nvvm_fmin_nan_bf16x2(BF16X2, NANBF16X2); + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmax.nan.f + __nvvm_fmax_nan_f(0.1f, 0.11f); + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmax.ftz.nan.f + __nvvm_fmax_ftz_nan_f(0.1f, (float)NAN32); + + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmax.nan.f + __nvvm_fmax_nan_f(0.1f, (float)NAN32); + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmax.ftz.nan.f + __nvvm_fmax_ftz_nan_f(0.1f, (float)NAN32); + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.fmax.bf16 + __nvvm_fmax_bf16(BF16, BF16_2); + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.fmax.nan.bf16 + __nvvm_fmax_nan_bf16(BF16, NANBF16); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.fmax.bf16x2 + __nvvm_fmax_bf16x2(BF16X2, BF16X2_2); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.fmax.nan.bf16x2 + __nvvm_fmax_nan_bf16x2(NANBF16X2, BF16X2); + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmax.nan.f + __nvvm_fmax_nan_f(0.1f, (float)NAN32); + // CHECK_PTX70_SM80: call float @llvm.nvvm.fmax.ftz.nan.f + __nvvm_fmax_ftz_nan_f(0.1f, (float)NAN32); + +#endif + // CHECK: ret void +} +// CHECK-LABEL: nvvm_min_max_sm86 +__device__ void nvvm_min_max_sm86() { +#if __CUDA_ARCH__ >= 860 + + // CHECK_PTX72_SM86: call i16 @llvm.nvvm.fmin.xorsign.abs.bf16 + __nvvm_fmin_xorsign_abs_bf16(BF16, BF16_2); + // CHECK_PTX72_SM86: call i16 @llvm.nvvm.fmin.nan.xorsign.abs.bf16 + __nvvm_fmin_nan_xorsign_abs_bf16(BF16, NANBF16); + // CHECK_PTX72_SM86: call i32 @llvm.nvvm.fmin.xorsign.abs.bf16x2 + __nvvm_fmin_xorsign_abs_bf16x2(BF16X2, BF16X2_2); + // CHECK_PTX72_SM86: call i32 @llvm.nvvm.fmin.nan.xorsign.abs.bf16x2 + __nvvm_fmin_nan_xorsign_abs_bf16x2(BF16X2, NANBF16X2); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmin.xorsign.abs.f + __nvvm_fmin_xorsign_abs_f(-0.1f, 0.1f); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmin.ftz.xorsign.abs.f + __nvvm_fmin_ftz_xorsign_abs_f(-0.1f, 0.1f); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmin.nan.xorsign.abs.f + __nvvm_fmin_nan_xorsign_abs_f(-0.1f, (float)NAN32); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f + __nvvm_fmin_ftz_nan_xorsign_abs_f(-0.1f, (float)NAN32); + + // CHECK_PTX72_SM86: call i16 @llvm.nvvm.fmax.xorsign.abs.bf16 + __nvvm_fmax_xorsign_abs_bf16(BF16, BF16_2); + // CHECK_PTX72_SM86: call i16 @llvm.nvvm.fmax.nan.xorsign.abs.bf16 + __nvvm_fmax_nan_xorsign_abs_bf16(BF16, NANBF16); + // CHECK_PTX72_SM86: call i32 @llvm.nvvm.fmax.xorsign.abs.bf16x2 + __nvvm_fmax_xorsign_abs_bf16x2(BF16X2, BF16X2_2); + // CHECK_PTX72_SM86: call i32 @llvm.nvvm.fmax.nan.xorsign.abs.bf16x2 + __nvvm_fmax_nan_xorsign_abs_bf16x2(BF16X2, NANBF16X2); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmax.xorsign.abs.f + __nvvm_fmax_xorsign_abs_f(-0.1f, 0.1f); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmax.ftz.xorsign.abs.f + __nvvm_fmax_ftz_xorsign_abs_f(-0.1f, 0.1f); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmax.nan.xorsign.abs.f + __nvvm_fmax_nan_xorsign_abs_f(-0.1f, (float)NAN32); + // CHECK_PTX72_SM86: call float @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f + __nvvm_fmax_ftz_nan_xorsign_abs_f(-0.1f, (float)NAN32); +#endif + // CHECK: ret void +} From 4057339f6c60a7b240e83384d5c8cfd36a855c83 Mon Sep 17 00:00:00 2001 From: Jakub Chlanda Date: Tue, 1 Mar 2022 10:29:54 -0800 Subject: [PATCH 12/18] [NVPTX] Add more FMA intriniscs/builtins This patch adds builtins/intrinsics for the following variants of FMA: NOTE: follow-up commit with the missing clang-side changes. - f16, f16x2 - rn - rn_ftz - rn_sat - rn_ftz_sat - rn_relu - rn_ftz_relu - bf16, bf16x2 - rn - rn_relu ptxas (Cuda compilation tools, release 11.0, V11.0.194) is happy with the generated assembly. Differential Revision: https://reviews.llvm.org/D118977 --- clang/include/clang/Basic/BuiltinsNVPTX.def | 22 ++++++++ .../CodeGen/builtins-nvptx-native-half-type.c | 56 +++++++++++++++++++ clang/test/CodeGen/builtins-nvptx.c | 16 ++++++ 3 files changed, 94 insertions(+) diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.def b/clang/include/clang/Basic/BuiltinsNVPTX.def index 926f6afca3acf..53c0cb9a074ac 100644 --- a/clang/include/clang/Basic/BuiltinsNVPTX.def +++ b/clang/include/clang/Basic/BuiltinsNVPTX.def @@ -17,6 +17,7 @@ # define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS) #endif +#pragma push_macro("SM_53") #pragma push_macro("SM_70") #pragma push_macro("SM_72") #pragma push_macro("SM_75") @@ -30,7 +31,9 @@ #pragma push_macro("SM_60") #define SM_60 "sm_60|sm_61|sm_62|" SM_70 +#define SM_53 "sm_53|" SM_60 +#pragma push_macro("PTX42") #pragma push_macro("PTX60") #pragma push_macro("PTX61") #pragma push_macro("PTX63") @@ -53,6 +56,7 @@ #define PTX63 "ptx63|" PTX64 #define PTX61 "ptx61|" PTX63 #define PTX60 "ptx60|" PTX61 +#define PTX42 "ptx42|" PTX60 #pragma push_macro("AND") #define AND(a, b) "(" a "),(" b ")" @@ -296,6 +300,22 @@ BUILTIN(__nvvm_cos_approx_f, "ff", "") // Fma +TARGET_BUILTIN(__nvvm_fma_rn_f16, "hhhh", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_ftz_f16, "hhhh", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_sat_f16, "hhhh", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_ftz_sat_f16, "hhhh", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_relu_f16, "hhhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_ftz_relu_f16, "hhhh", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_ftz_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_sat_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_ftz_sat_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42)) +TARGET_BUILTIN(__nvvm_fma_rn_relu_f16x2, "V2hV2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_ftz_relu_f16x2, "V2hV2hV2hV2h", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_bf16, "UsUsUsUs", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16, "UsUsUsUs", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_bf16x2, "ZUiZUiZUiZUi", "", AND(SM_80, PTX70)) +TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16x2, "ZUiZUiZUiZUi", "", AND(SM_80, PTX70)) BUILTIN(__nvvm_fma_rn_ftz_f, "ffff", "") BUILTIN(__nvvm_fma_rn_f, "ffff", "") BUILTIN(__nvvm_fma_rz_ftz_f, "ffff", "") @@ -2387,12 +2407,14 @@ TARGET_BUILTIN(__nvvm_neg_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70)) #undef BUILTIN #undef TARGET_BUILTIN #pragma pop_macro("AND") +#pragma pop_macro("SM_53") #pragma pop_macro("SM_60") #pragma pop_macro("SM_70") #pragma pop_macro("SM_72") #pragma pop_macro("SM_75") #pragma pop_macro("SM_80") #pragma pop_macro("SM_86") +#pragma pop_macro("PTX42") #pragma pop_macro("PTX60") #pragma pop_macro("PTX61") #pragma pop_macro("PTX63") diff --git a/clang/test/CodeGen/builtins-nvptx-native-half-type.c b/clang/test/CodeGen/builtins-nvptx-native-half-type.c index 4440b274f670f..c232c4de5640a 100644 --- a/clang/test/CodeGen/builtins-nvptx-native-half-type.c +++ b/clang/test/CodeGen/builtins-nvptx-native-half-type.c @@ -20,6 +20,16 @@ // RUN: -fnative-half-type -S -emit-llvm -o - -x cuda %s \ // RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX72_SM86 %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ +// RUN: sm_53 -target-feature +ptx42 -fcuda-is-device -fnative-half-type -S \ +// RUN: -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX42_SM53 %s + +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown \ +// RUN: -target-cpu sm_53 -target-feature +ptx42 -fcuda-is-device \ +// RUN: -fnative-half-type -S -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX42_SM53 %s + #define __device__ __attribute__((device)) // CHECK-LABEL: nvvm_min_max_sm80 @@ -62,6 +72,52 @@ __device__ void nvvm_min_max_sm80() { // CHECK: ret void } +// CHECK-LABEL: nvvm_fma_f16_f16x2_sm80 +__device__ void nvvm_fma_f16_f16x2_sm80() { +#if __CUDA_ARCH__ >= 800 + // CHECK_PTX70_SM80: call half @llvm.nvvm.fma.rn.relu.f16 + __nvvm_fma_rn_relu_f16(0.1f16, 0.1f16, 0.1f16); + // CHECK_PTX70_SM80: call half @llvm.nvvm.fma.rn.ftz.relu.f16 + __nvvm_fma_rn_ftz_relu_f16(0.1f16, 0.1f16, 0.1f16); + + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fma.rn.relu.f16x2 + __nvvm_fma_rn_relu_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}, + {0.1f16, 0.7f16}); + // CHECK_PTX70_SM80: call <2 x half> @llvm.nvvm.fma.rn.ftz.relu.f16x2 + __nvvm_fma_rn_ftz_relu_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}, + {0.1f16, 0.7f16}); +#endif + // CHECK: ret void +} + +// CHECK-LABEL: nvvm_fma_f16_f16x2_sm53 +__device__ void nvvm_fma_f16_f16x2_sm53() { +#if __CUDA_ARCH__ >= 530 + // CHECK_PTX42_SM53: call half @llvm.nvvm.fma.rn.f16 + __nvvm_fma_rn_f16(0.1f16, 0.1f16, 0.1f16); + // CHECK_PTX42_SM53: call half @llvm.nvvm.fma.rn.ftz.f16 + __nvvm_fma_rn_ftz_f16(0.1f16, 0.1f16, 0.1f16); + // CHECK_PTX42_SM53: call half @llvm.nvvm.fma.rn.sat.f16 + __nvvm_fma_rn_sat_f16(0.1f16, 0.1f16, 0.1f16); + // CHECK_PTX42_SM53: call half @llvm.nvvm.fma.rn.ftz.sat.f16 + __nvvm_fma_rn_ftz_sat_f16(0.1f16, 0.1f16, 0.1f16); + + // CHECK_PTX42_SM53: call <2 x half> @llvm.nvvm.fma.rn.f16x2 + __nvvm_fma_rn_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}, + {0.1f16, 0.7f16}); + // CHECK_PTX42_SM53: call <2 x half> @llvm.nvvm.fma.rn.ftz.f16x2 + __nvvm_fma_rn_ftz_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}, + {0.1f16, 0.7f16}); + // CHECK_PTX42_SM53: call <2 x half> @llvm.nvvm.fma.rn.sat.f16x2 + __nvvm_fma_rn_sat_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}, + {0.1f16, 0.7f16}); + // CHECK_PTX42_SM53: call <2 x half> @llvm.nvvm.fma.rn.ftz.sat.f16x2 + __nvvm_fma_rn_ftz_sat_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16}, + {0.1f16, 0.7f16}); +#endif + // CHECK: ret void +} + // CHECK-LABEL: nvvm_min_max_sm86 __device__ void nvvm_min_max_sm86() { #if __CUDA_ARCH__ >= 860 diff --git a/clang/test/CodeGen/builtins-nvptx.c b/clang/test/CodeGen/builtins-nvptx.c index 9789df8977341..368974095fad7 100644 --- a/clang/test/CodeGen/builtins-nvptx.c +++ b/clang/test/CodeGen/builtins-nvptx.c @@ -4306,6 +4306,22 @@ __device__ void nvvm_min_max_sm80() { #endif // CHECK: ret void } + +// CHECK-LABEL: nvvm_fma_bf16_bf16x2_sm80 +__device__ void nvvm_fma_bf16_bf16x2_sm80() { +#if __CUDA_ARCH__ >= 800 + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.fma.rn.bf16 + __nvvm_fma_rn_bf16(0x1234, 0x7FBF, 0x1234); + // CHECK_PTX70_SM80: call i16 @llvm.nvvm.fma.rn.relu.bf16 + __nvvm_fma_rn_relu_bf16(0x1234, 0x7FBF, 0x1234); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.fma.rn.bf16x2 + __nvvm_fma_rn_bf16x2(0x7FBFFFFF, 0xFFFFFFFF, 0x7FBFFFFF); + // CHECK_PTX70_SM80: call i32 @llvm.nvvm.fma.rn.relu.bf16x2 + __nvvm_fma_rn_relu_bf16x2(0x7FBFFFFF, 0xFFFFFFFF, 0x7FBFFFFF); +#endif + // CHECK: ret void +} + // CHECK-LABEL: nvvm_min_max_sm86 __device__ void nvvm_min_max_sm86() { #if __CUDA_ARCH__ >= 860 From c0610903bf1365219a9278dfc26e26bc8da063fe Mon Sep 17 00:00:00 2001 From: Nicolas Miller Date: Tue, 1 Mar 2022 10:34:02 -0800 Subject: [PATCH 13/18] [NVPTX] Add ex2.approx.f16/f16x2 support NOTE: this is a follow-up commit with the missing clang-side changes. This patch adds builtins and intrinsics for the f16 and f16x2 variants of the ex2 instruction. These two variants were added in PTX7.0, and are supported by sm_75 and above. Note that this isn't wired with the exp2 llvm intrinsic because the ex2 instruction is only available in its approx variant. Running ptxas on the assembly generated by the test f16-ex2.ll works as expected. Differential Revision: https://reviews.llvm.org/D119157 --- clang/include/clang/Basic/BuiltinsNVPTX.def | 2 ++ .../CodeGen/builtins-nvptx-native-half-type.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.def b/clang/include/clang/Basic/BuiltinsNVPTX.def index 53c0cb9a074ac..34ba0308c6012 100644 --- a/clang/include/clang/Basic/BuiltinsNVPTX.def +++ b/clang/include/clang/Basic/BuiltinsNVPTX.def @@ -285,6 +285,8 @@ BUILTIN(__nvvm_saturate_d, "dd", "") BUILTIN(__nvvm_ex2_approx_ftz_f, "ff", "") BUILTIN(__nvvm_ex2_approx_f, "ff", "") BUILTIN(__nvvm_ex2_approx_d, "dd", "") +TARGET_BUILTIN(__nvvm_ex2_approx_f16, "hh", "", AND(SM_75, PTX70)) +TARGET_BUILTIN(__nvvm_ex2_approx_f16x2, "V2hV2h", "", AND(SM_75, PTX70)) BUILTIN(__nvvm_lg2_approx_ftz_f, "ff", "") BUILTIN(__nvvm_lg2_approx_f, "ff", "") diff --git a/clang/test/CodeGen/builtins-nvptx-native-half-type.c b/clang/test/CodeGen/builtins-nvptx-native-half-type.c index c232c4de5640a..95021f274cd0f 100644 --- a/clang/test/CodeGen/builtins-nvptx-native-half-type.c +++ b/clang/test/CodeGen/builtins-nvptx-native-half-type.c @@ -1,4 +1,9 @@ // REQUIRES: nvptx-registered-target +// +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ +// RUN: sm_75 -target-feature +ptx70 -fcuda-is-device -fnative-half-type -S \ +// RUN: -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX70_SM75 %s // RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ // RUN: sm_80 -target-feature +ptx70 -fcuda-is-device -fnative-half-type -S \ @@ -32,6 +37,16 @@ #define __device__ __attribute__((device)) +__device__ void nvvm_ex2_sm75() { +#if __CUDA_ARCH__ >= 750 + // CHECK_PTX70_SM75: call half @llvm.nvvm.ex2.approx.f16 + __nvvm_ex2_approx_f16(0.1f16); + // CHECK_PTX70_SM75: call <2 x half> @llvm.nvvm.ex2.approx.f16x2 + __nvvm_ex2_approx_f16x2({0.1f16, 0.7f16}); +#endif + // CHECK: ret void +} + // CHECK-LABEL: nvvm_min_max_sm80 __device__ void nvvm_min_max_sm80() { #if __CUDA_ARCH__ >= 800 From 5ea7a87128161624cbbe60b36d00bffaf14a8b74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Wed, 2 Mar 2022 08:38:26 +0000 Subject: [PATCH 14/18] bugfixes --- libclc/ptx-nvidiacl/libspirv/math/fabs.cl | 6 ++++-- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 4 ++-- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fabs.cl b/libclc/ptx-nvidiacl/libspirv/math/fabs.cl index 1d896d07ecae3..2795b6acc9fa0 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fabs.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fabs.cl @@ -11,6 +11,8 @@ #include "../../include/libdevice.h" #include +extern int __clc_nvvm_reflect_arch(); + #define __CLC_FUNCTION __spirv_ocl_fabs #define __CLC_BUILTIN __nv_fabs #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) @@ -18,7 +20,7 @@ _CLC_DEF _CLC_OVERLOAD ushort __clc_fabs(ushort x) { if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_fabs_bf16(x); + return __nvvm_abs_bf16(x); } __builtin_trap(); __builtin_unreachable(); @@ -27,7 +29,7 @@ _CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fabs, ushort) _CLC_DEF _CLC_OVERLOAD uint __clc_fabs(uint x) { if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_fabs_bf16x2(x); + return __nvvm_abs_bf16x2(x); } __builtin_trap(); __builtin_unreachable(); diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index 3d1e82ab1c6dd..de89ad3ad77a1 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -52,7 +52,7 @@ _CLC_BINARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmax, _CLC_DEF _CLC_OVERLOAD ushort __clc_fmax(ushort x, ushort y) { if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_max_rn_bf16(x, y); + return __nvvm_fmax_bf16(x, y); } __builtin_trap(); __builtin_unreachable(); @@ -62,7 +62,7 @@ _CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fmax, ushort, _CLC_DEF _CLC_OVERLOAD uint __clc_fmax(uint x, uint y) { if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_max_rn_bf16x2(x, y); + return __nvvm_fmax_bf16x2(x, y); } __builtin_trap(); __builtin_unreachable(); diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index 8796a90044f2d..b1674fb0cfbcf 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -52,7 +52,7 @@ _CLC_BINARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, __spirv_ocl_fmin, half _CLC_DEF _CLC_OVERLOAD ushort __clc_fmin(ushort x, ushort y) { if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_max_rn_bf16(x, y); + return __nvvm_fmin_bf16(x, y); } __builtin_trap(); __builtin_unreachable(); @@ -62,7 +62,7 @@ _CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, ushort, __clc_fmin, ushort, _CLC_DEF _CLC_OVERLOAD uint __clc_fmin(uint x, uint y) { if (__clc_nvvm_reflect_arch() >= 800) { - return __nvvm_max_rn_bf16x2(x, y); + return __nvvm_fmin_bf16x2(x, y); } __builtin_trap(); __builtin_unreachable(); From 1b490100d08ea655290d8175e47b897f6c78caef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Ciglari=C4=8D?= Date: Wed, 9 Mar 2022 11:56:36 +0100 Subject: [PATCH 15/18] Apply suggestions from code review Apply review suggestions. Co-authored-by: Alexey Bader --- libclc/ptx-nvidiacl/libspirv/math/fabs.cl | 2 +- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 2 +- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 2 +- .../ptx-nvidiacl/libspirv/math/native_exp2.cl | 18 ++++++++---------- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fabs.cl b/libclc/ptx-nvidiacl/libspirv/math/fabs.cl index 2795b6acc9fa0..0aac0fa4ab0f0 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fabs.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fabs.cl @@ -34,4 +34,4 @@ _CLC_DEF _CLC_OVERLOAD uint __clc_fabs(uint x) { __builtin_trap(); __builtin_unreachable(); } -_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fabs, uint) \ No newline at end of file +_CLC_UNARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fabs, uint) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index de89ad3ad77a1..c43b6f45e22d2 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -68,4 +68,4 @@ _CLC_DEF _CLC_OVERLOAD uint __clc_fmax(uint x, uint y) { __builtin_unreachable(); } _CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fmax, uint, - uint) \ No newline at end of file + uint) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index b1674fb0cfbcf..cc6a3e731e1d6 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -68,4 +68,4 @@ _CLC_DEF _CLC_OVERLOAD uint __clc_fmin(uint x, uint y) { __builtin_unreachable(); } _CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, uint, __clc_fmin, uint, - uint) \ No newline at end of file + uint) diff --git a/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl b/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl index 3cecd6ca93cce..aff20583ab9c8 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl @@ -23,23 +23,21 @@ int __clc_nvvm_reflect_arch(); _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_native_exp2(half x) { if (__clc_nvvm_reflect_arch() >= 750) { return __nvvm_ex2_approx_f16(x); - } else { - float upcast = x; - return __spirv_ocl_native_exp2(upcast); } + float upcast = x; + return __spirv_ocl_native_exp2(upcast); } _CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_native_exp2(half2 x) { if (__clc_nvvm_reflect_arch() >= 750) { return __nvvm_ex2_approx_f16x2(x); - } else { - float upcast0 = x[0]; - float upcast1 = x[1]; - half2 res; - res.s0 = __spirv_ocl_native_exp2(upcast0); - res.s1 = __spirv_ocl_native_exp2(upcast1); - return res; } + float upcast0 = x[0]; + float upcast1 = x[1]; + half2 res; + res.s0 = __spirv_ocl_native_exp2(upcast0); + res.s1 = __spirv_ocl_native_exp2(upcast1); + return res; } _CLC_UNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, From 50945d068993a84b8de6f942f66ecf080269977e Mon Sep 17 00:00:00 2001 From: Tadej Ciglaric Date: Wed, 9 Mar 2022 11:02:46 +0000 Subject: [PATCH 16/18] change fmax and fmin fallback implementation back to libdevice float functions --- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 2 +- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index c43b6f45e22d2..5ae561bf5b39b 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -37,7 +37,7 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmax(half x, half y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fmax_f16(x, y); } - return x > y ? x : y; + return __nv_fmaxf(x,y); } _CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fmax(half2 x, half2 y) { if (__clc_nvvm_reflect_arch() >= 800) { diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index cc6a3e731e1d6..3c6e23a62eeee 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -37,7 +37,7 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmin(half x, half y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fmin_f16(x, y); } - return x < y ? x : y; + return __nv_fminf(x,y); } _CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fmin(half2 x, half2 y) { if (__clc_nvvm_reflect_arch() >= 800) { From 4658aac8216027a4d24e774ffb77961fb1284340 Mon Sep 17 00:00:00 2001 From: Tadej Ciglaric Date: Wed, 9 Mar 2022 11:39:06 +0000 Subject: [PATCH 17/18] fix libdevice builtin names --- libclc/ptx-nvidiacl/libspirv/math/fmax.cl | 3 ++- libclc/ptx-nvidiacl/libspirv/math/fmin.cl | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl index 5ae561bf5b39b..645762000ff53 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmax.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmax.cl @@ -8,6 +8,7 @@ #include #include +#include "../../include/libdevice.h" extern int __clc_nvvm_reflect_arch(); @@ -37,7 +38,7 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmax(half x, half y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fmax_f16(x, y); } - return __nv_fmaxf(x,y); + return __nvvm_fmax_f(x,y); } _CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fmax(half2 x, half2 y) { if (__clc_nvvm_reflect_arch() >= 800) { diff --git a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl index 3c6e23a62eeee..6bdc4b8176be5 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/fmin.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/fmin.cl @@ -8,6 +8,7 @@ #include #include +#include "../../include/libdevice.h" extern int __clc_nvvm_reflect_arch(); @@ -37,7 +38,7 @@ _CLC_DEF _CLC_OVERLOAD half __spirv_ocl_fmin(half x, half y) { if (__clc_nvvm_reflect_arch() >= 800) { return __nvvm_fmin_f16(x, y); } - return __nv_fminf(x,y); + return __nvvm_fmin_f(x,y); } _CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_fmin(half2 x, half2 y) { if (__clc_nvvm_reflect_arch() >= 800) { From cab615028bd2dc97e2116840a7d1912658b81602 Mon Sep 17 00:00:00 2001 From: Tadej Ciglaric Date: Wed, 9 Mar 2022 13:25:25 +0000 Subject: [PATCH 18/18] removed native_exp2.cl --- .../ptx-nvidiacl/libspirv/math/native_exp2.cl | 35 ------------------- 1 file changed, 35 deletions(-) diff --git a/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl b/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl index aff20583ab9c8..93c12c0aeb067 100644 --- a/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl +++ b/libclc/ptx-nvidiacl/libspirv/math/native_exp2.cl @@ -14,39 +14,4 @@ #define __CLC_FUNCTION __spirv_ocl_native_exp2 #define __CLC_BUILTIN __nv_exp2 #define __CLC_BUILTIN_F __CLC_XCONCAT(__CLC_BUILTIN, f) - -#ifdef cl_khr_fp16 -#pragma OPENCL EXTENSION cl_khr_fp16 : enable - -int __clc_nvvm_reflect_arch(); - -_CLC_DEF _CLC_OVERLOAD half __spirv_ocl_native_exp2(half x) { - if (__clc_nvvm_reflect_arch() >= 750) { - return __nvvm_ex2_approx_f16(x); - } - float upcast = x; - return __spirv_ocl_native_exp2(upcast); -} - -_CLC_DEF _CLC_OVERLOAD half2 __spirv_ocl_native_exp2(half2 x) { - if (__clc_nvvm_reflect_arch() >= 750) { - return __nvvm_ex2_approx_f16x2(x); - } - float upcast0 = x[0]; - float upcast1 = x[1]; - half2 res; - res.s0 = __spirv_ocl_native_exp2(upcast0); - res.s1 = __spirv_ocl_native_exp2(upcast1); - return res; -} - -_CLC_UNARY_VECTORIZE_HAVE2(_CLC_OVERLOAD _CLC_DEF, half, - __spirv_ocl_native_exp2, half) - -#endif // cl_khr_fp16 - -// Undef halfs before uncluding unary builtins, as they are handled above. -#ifdef cl_khr_fp16 -#undef cl_khr_fp16 -#endif // cl_khr_fp16 #include