Skip to content

Commit 55c48ee

Browse files
authored
[RISCV] Ignore interleaved accesses with non-default address spaces (#139698)
This fixes a crash introduced in #137045 (comment) where we don't have overloaded pointer types for segmented load/store intrinsics. This should be temporary until #139634 lands and overloads the pointer type for these
1 parent 84c1564 commit 55c48ee

File tree

2 files changed

+39
-5
lines changed

2 files changed

+39
-5
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23417,6 +23417,11 @@ bool RISCVTargetLowering::isLegalInterleavedAccessType(
2341723417

2341823418
MVT ContainerVT = VT.getSimpleVT();
2341923419

23420+
// The intrinsics are not (yet) overloaded on pointer type and can only handle
23421+
// the default address space.
23422+
if (AddrSpace)
23423+
return false;
23424+
2342023425
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
2342123426
if (!Subtarget.useRVVForFixedLengthVectors())
2342223427
return false;
@@ -23426,11 +23431,6 @@ bool RISCVTargetLowering::isLegalInterleavedAccessType(
2342623431
return false;
2342723432

2342823433
ContainerVT = getContainerForFixedLengthVector(VT.getSimpleVT());
23429-
} else {
23430-
// The intrinsics for scalable vectors are not overloaded on pointer type
23431-
// and can only handle the default address space.
23432-
if (AddrSpace)
23433-
return false;
2343423434
}
2343523435

2343623436
// Need to make sure that EMUL * NFIELDS ≤ 8
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt < %s -mtriple=riscv64 -mattr=+v -p interleaved-access -S | FileCheck %s
3+
4+
; Ensure we don't crash with non-zero address spaces.
5+
6+
define void @load_factor2(ptr addrspace(1) %ptr) {
7+
; CHECK-LABEL: define void @load_factor2(
8+
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
9+
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = load <16 x i32>, ptr addrspace(1) [[PTR]], align 64
10+
; CHECK-NEXT: [[V0:%.*]] = shufflevector <16 x i32> [[INTERLEAVED_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
11+
; CHECK-NEXT: [[V1:%.*]] = shufflevector <16 x i32> [[INTERLEAVED_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
12+
; CHECK-NEXT: ret void
13+
;
14+
%interleaved.vec = load <16 x i32>, ptr addrspace(1) %ptr
15+
%v0 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
16+
%v1 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
17+
ret void
18+
}
19+
20+
define void @load_factor2_vscale(ptr addrspace(1) %ptr) {
21+
; CHECK-LABEL: define void @load_factor2_vscale(
22+
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
23+
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = load <vscale x 16 x i32>, ptr addrspace(1) [[PTR]], align 64
24+
; CHECK-NEXT: [[V:%.*]] = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> [[INTERLEAVED_VEC]])
25+
; CHECK-NEXT: [[T0:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[V]], 0
26+
; CHECK-NEXT: [[T1:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[V]], 1
27+
; CHECK-NEXT: ret void
28+
;
29+
%interleaved.vec = load <vscale x 16 x i32>, ptr addrspace(1) %ptr
30+
%v = call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %interleaved.vec)
31+
%t0 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %v, 0
32+
%t1 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %v, 1
33+
ret void
34+
}

0 commit comments

Comments
 (0)