|
| 1 | +// compile-flags: -C no-prepopulate-passes |
| 2 | +// |
| 3 | + |
| 4 | +#![crate_type = "lib"] |
| 5 | +#![feature(repr_simd, platform_intrinsics)] |
| 6 | +#![allow(non_camel_case_types)] |
| 7 | + |
| 8 | +#[repr(simd)] |
| 9 | +#[derive(Copy, Clone)] |
| 10 | +pub struct mask32x2(i32, i32); |
| 11 | + |
| 12 | +#[repr(simd)] |
| 13 | +#[derive(Copy, Clone)] |
| 14 | +pub struct mask8x16(i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8); |
| 15 | + |
| 16 | +extern "platform-intrinsic" { |
| 17 | + fn simd_reduce_all<T>(x: T) -> bool; |
| 18 | + fn simd_reduce_any<T>(x: T) -> bool; |
| 19 | +} |
| 20 | + |
| 21 | +// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM) |
| 22 | +// SIMD vectors are passed directly, resulting in `%x` being a vector, |
| 23 | +// while on others they're passed indirectly, resulting in `%x` being |
| 24 | +// a pointer to a vector, and `%1` a vector loaded from that pointer. |
| 25 | +// This is controlled by the target spec option `simd_types_indirect`. |
| 26 | + |
| 27 | +// CHECK-LABEL: @reduce_any_32x2 |
| 28 | +#[no_mangle] |
| 29 | +pub unsafe fn reduce_any_32x2(x: mask32x2) -> bool { |
| 30 | + // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31> |
| 31 | + // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1> |
| 32 | + // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> [[B]]) |
| 33 | + // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8 |
| 34 | + simd_reduce_any(x) |
| 35 | +} |
| 36 | + |
| 37 | +// CHECK-LABEL: @reduce_all_32x2 |
| 38 | +#[no_mangle] |
| 39 | +pub unsafe fn reduce_all_32x2(x: mask32x2) -> bool { |
| 40 | + // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31> |
| 41 | + // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1> |
| 42 | + // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> [[B]]) |
| 43 | + // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8 |
| 44 | + simd_reduce_all(x) |
| 45 | +} |
| 46 | + |
| 47 | +// CHECK-LABEL: @reduce_any_8x16 |
| 48 | +#[no_mangle] |
| 49 | +pub unsafe fn reduce_any_8x16(x: mask8x16) -> bool { |
| 50 | + // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> |
| 51 | + // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1> |
| 52 | + // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[B]]) |
| 53 | + // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8 |
| 54 | + simd_reduce_any(x) |
| 55 | +} |
| 56 | + |
| 57 | +// CHECK-LABEL: @reduce_all_8x16 |
| 58 | +#[no_mangle] |
| 59 | +pub unsafe fn reduce_all_8x16(x: mask8x16) -> bool { |
| 60 | + // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> |
| 61 | + // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1> |
| 62 | + // CHECK: [[C:%[0-9]+]] = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> [[B]]) |
| 63 | + // CHECK: %{{[0-9]+}} = zext i1 [[C]] to i8 |
| 64 | + simd_reduce_all(x) |
| 65 | +} |
0 commit comments