Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GlobalISel]: G_UNMERGE_VALUES for vectors with different element sizes #133335

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 22 additions & 11 deletions llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8281,9 +8281,8 @@ LegalizerHelper::LegalizeResult
LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
const unsigned NumDst = MI.getNumOperands() - 1;
Register SrcReg = MI.getOperand(NumDst).getReg();
Register Dst0Reg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst0Reg);
if (DstTy.isPointer())
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
if (DstTy.getScalarType().isPointer())
return UnableToLegalize; // TODO

SrcReg = coerceToScalar(SrcReg);
Expand All @@ -8293,14 +8292,26 @@ LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
// Expand scalarizing unmerge as bitcast to integer and shift.
LLT IntTy = MRI.getType(SrcReg);

MIRBuilder.buildTrunc(Dst0Reg, SrcReg);

const unsigned DstSize = DstTy.getSizeInBits();
unsigned Offset = DstSize;
for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
auto Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt);
MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
const unsigned DstSize = DstTy.getScalarSizeInBits();
SmallVector<Register> VectorElems;
Register Shift;
for (unsigned I = 0, Offset = 0; I != NumDst; Offset += DstSize) {
if (Offset) {
auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt).getReg(0);
} else {
Shift = SrcReg;
}
if (DstTy.isVector()) {
VectorElems.emplace_back(
MIRBuilder.buildTrunc(DstTy.getScalarType(), Shift).getReg(0));
if (VectorElems.size() == DstTy.getNumElements()) {
MIRBuilder.buildBuildVector(MI.getOperand(I++), VectorElems);
VectorElems.clear();
}
} else {
MIRBuilder.buildTrunc(MI.getOperand(I++), Shift);
}
}

MI.eraseFromParent();
Expand Down
10 changes: 5 additions & 5 deletions llvm/lib/CodeGen/MachineVerifier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1510,11 +1510,11 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {

LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
if (DstTy.isVector()) {
// This case is the converse of G_CONCAT_VECTORS.
if (!SrcTy.isVector() ||
(SrcTy.getScalarType() != DstTy.getScalarType() &&
!SrcTy.isPointerVector()) ||
SrcTy.isScalableVector() != DstTy.isScalableVector() ||
// This case is the converse of G_CONCAT_VECTORS, but relaxed since
// G_UNMERGE_VALUES can handle src and dst vectors with different
// element sizes:
Comment on lines +1513 to +1515
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It probably should not support this case. It's making the MIR much more complicated, forcing complexity into every piece of code that wants to do anything to a G_UNMERGE_VALUES. You should be required to unmerge by element, and insert explicit bitcasts

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, so I could do this in LegalizationArtifactCombiner::ArtifactValueFinder::tryCombineMergeLike.
But wouldn't that add complexity in other code, such as the tryCombineMergeLike? Because I think this method is currently relying on unmerge to do the right thing:

// Recognize UnmergeSrc that can be unmerged to DstTy directly.
// Types have to be either both vector or both non-vector types.
// Merge-like opcodes are combined one at the time. First one creates new
// unmerge, following should use the same unmerge (builder performs CSE).
//
// %0:_(EltTy), %1, %2, %3 = G_UNMERGE_VALUES %UnmergeSrc:_(UnmergeSrcTy)
// %Dst:_(DstTy) = G_merge_like_opcode %0:_(EltTy), %1
// %AnotherDst:_(DstTy) = G_merge_like_opcode %2:_(EltTy), %3
//
// %Dst:_(DstTy), %AnotherDst = G_UNMERGE_VALUES %UnmergeSrc
if ((DstTy.isVector() == UnmergeSrcTy.isVector()) &&
(Elt0UnmergeIdx % NumMIElts == 0) &&
getCoverTy(UnmergeSrcTy, DstTy) == UnmergeSrcTy) {
if (!isSequenceFromUnmerge(MI, 0, Unmerge, Elt0UnmergeIdx, NumMIElts,
EltSize, false))
return false;
MIB.setInstrAndDebugLoc(MI);
auto NewUnmerge = MIB.buildUnmerge(DstTy, Unmerge->getSourceReg());
unsigned DstIdx = (Elt0UnmergeIdx * EltSize) / DstTy.getSizeInBits();
replaceRegOrBuildCopy(Dst, NewUnmerge.getReg(DstIdx), MRI, MIB,
UpdatedDefs, Observer);
DeadInsts.push_back(&MI);
return true;
}

// %1:_(<2 x s8>), %2:_(<2 x s8>) = G_UNMERGE_VALUES %0:_(<2 x s16>)
if (SrcTy.isScalableVector() != DstTy.isScalableVector() ||
SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
report("G_UNMERGE_VALUES source operand does not match vector "
"destination operands",
Expand Down
55 changes: 55 additions & 0 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6508,3 +6508,58 @@ entry:
%insert = insertelement <5 x double> %vec, double %val, i32 %idx
ret <5 x double> %insert
}

; Found by fuzzer, reduced with llvm-reduce.
define amdgpu_kernel void @insert_very_small_from_very_large(<32 x i16> %L3, ptr %ptr) {
; GPRIDX-LABEL: insert_very_small_from_very_large:
; GPRIDX: ; %bb.0: ; %bb
; GPRIDX-NEXT: s_load_dwordx16 s[12:27], s[8:9], 0x0
; GPRIDX-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x40
; GPRIDX-NEXT: s_waitcnt lgkmcnt(0)
; GPRIDX-NEXT: s_lshr_b32 s2, s12, 1
; GPRIDX-NEXT: s_and_b32 s2, s2, 1
; GPRIDX-NEXT: s_lshl_b32 s2, s2, 1
; GPRIDX-NEXT: v_mov_b32_e32 v0, s0
; GPRIDX-NEXT: v_mov_b32_e32 v2, s2
; GPRIDX-NEXT: v_mov_b32_e32 v1, s1
; GPRIDX-NEXT: flat_store_byte v[0:1], v2
; GPRIDX-NEXT: s_endpgm
;
; GFX10-LABEL: insert_very_small_from_very_large:
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_load_dwordx16 s[12:27], s[8:9], 0x0
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x40
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_lshr_b32 s2, s12, 1
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: s_and_b32 s2, s2, 1
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: s_lshl_b32 s2, s2, 1
; GFX10-NEXT: v_mov_b32_e32 v2, s2
; GFX10-NEXT: flat_store_byte v[0:1], v2
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: insert_very_small_from_very_large:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b512 s[8:23], s[4:5], 0x0
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x40
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_lshr_b32 s2, s8, 1
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_and_b32 s2, s2, 1
; GFX11-NEXT: v_mov_b32_e32 v1, s1
; GFX11-NEXT: s_lshl_b32 s2, s2, 1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: flat_store_b8 v[0:1], v2
; GFX11-NEXT: s_endpgm
bb:
%a = bitcast <32 x i16> %L3 to i512
%b = trunc i512 %a to i8
%c = trunc i8 %b to i2
%d = bitcast i2 %c to <2 x i1>
%insert = insertelement <2 x i1> %d, i1 false, i32 0
store <2 x i1> %insert, ptr %ptr, align 1
ret void
}
155 changes: 155 additions & 0 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,41 @@ body: |
$vgpr1 = COPY %4
...

---
name: test_unmerge_v2s8_v2s16
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_unmerge_v2s8_v2s16
; CHECK: liveins: $vgpr0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C2]](s32)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C3]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C3]]
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LSHR2]], [[C1]](s32)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[LSHR1]], [[SHL1]]
; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
; CHECK-NEXT: $vgpr0 = COPY [[BITCAST1]](<2 x s16>)
; CHECK-NEXT: $vgpr1 = COPY [[BITCAST2]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s8>), %2:_(<2 x s8>) = G_UNMERGE_VALUES %0:_(<2 x s16>)
%3:_(<2 x s16>) = G_ANYEXT %1
%4:_(<2 x s16>) = G_ANYEXT %2
$vgpr0 = COPY %3
$vgpr1 = COPY %4
...

---
name: test_unmerge_s16_v3s16
body: |
Expand All @@ -120,6 +155,50 @@ body: |
$vgpr2 = COPY %6
...

---
name: test_unmerge_v2s8_v3s16
body: |
bb.0:
; CHECK-LABEL: name: test_unmerge_v2s8_v3s16
; CHECK: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C2]](s32)
; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C3]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C3]]
; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[LSHR2]], [[C1]](s32)
; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[LSHR1]], [[SHL1]]
; CHECK-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[BITCAST2]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[LSHR3]], [[C3]]
; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL2]]
; CHECK-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
; CHECK-NEXT: $vgpr0 = COPY [[BITCAST3]](<2 x s16>)
; CHECK-NEXT: $vgpr1 = COPY [[BITCAST4]](<2 x s16>)
; CHECK-NEXT: $vgpr2 = COPY [[BITCAST5]](<2 x s16>)
%0:_(<3 x s16>) = G_IMPLICIT_DEF
%1:_(<2 x s8>), %2:_(<2 x s8>), %3:_(<2 x s8>) = G_UNMERGE_VALUES %0
%4:_(<2 x s16>) = G_ANYEXT %1
%5:_(<2 x s16>) = G_ANYEXT %2
%6:_(<2 x s16>) = G_ANYEXT %3
$vgpr0 = COPY %4
$vgpr1 = COPY %5
$vgpr2 = COPY %6
...

---

name: test_unmerge_s16_v4s16
Expand Down Expand Up @@ -191,6 +270,62 @@ body: |
$vgpr5 = COPY %12
...

---
name: test_unmerge_v4s8_v6s16
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_unmerge_v4s8_v6s16
; CHECK: liveins: $vgpr0_vgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32), [[ANYEXT3]](s32)
; CHECK-NEXT: [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8), [[UV16:%[0-9]+]]:_(s8), [[UV17:%[0-9]+]]:_(s8), [[UV18:%[0-9]+]]:_(s8), [[UV19:%[0-9]+]]:_(s8), [[UV20:%[0-9]+]]:_(s8), [[UV21:%[0-9]+]]:_(s8), [[UV22:%[0-9]+]]:_(s8), [[UV23:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV16]](s8)
; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV17]](s8)
; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV18]](s8)
; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV19]](s8)
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ANYEXT4]](s32), [[ANYEXT5]](s32), [[ANYEXT6]](s32), [[ANYEXT7]](s32)
; CHECK-NEXT: [[UV24:%[0-9]+]]:_(s8), [[UV25:%[0-9]+]]:_(s8), [[UV26:%[0-9]+]]:_(s8), [[UV27:%[0-9]+]]:_(s8), [[UV28:%[0-9]+]]:_(s8), [[UV29:%[0-9]+]]:_(s8), [[UV30:%[0-9]+]]:_(s8), [[UV31:%[0-9]+]]:_(s8), [[UV32:%[0-9]+]]:_(s8), [[UV33:%[0-9]+]]:_(s8), [[UV34:%[0-9]+]]:_(s8), [[UV35:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
; CHECK-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV32]](s8)
; CHECK-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV33]](s8)
; CHECK-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV34]](s8)
; CHECK-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV35]](s8)
; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ANYEXT8]](s32), [[ANYEXT9]](s32), [[ANYEXT10]](s32), [[ANYEXT11]](s32)
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
; CHECK-NEXT: $vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR1]](<4 x s32>)
; CHECK-NEXT: $vgpr8_vgpr9_vgpr10_vgpr11 = COPY [[BUILD_VECTOR2]](<4 x s32>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<4 x s8>), %2:_(<4 x s8>), %3:_(<4 x s8>) = G_UNMERGE_VALUES %0
%4:_(<4 x s32>) = G_ANYEXT %1
%5:_(<4 x s32>) = G_ANYEXT %2
%6:_(<4 x s32>) = G_ANYEXT %3
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %4
$vgpr4_vgpr5_vgpr6_vgpr7 = COPY %5
$vgpr8_vgpr9_vgpr10_vgpr11 = COPY %6
...

---
name: test_unmerge_v3s32_v6s16
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_unmerge_v3s32_v6s16
; CHECK: liveins: $vgpr0_vgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK-NEXT: [[UV:%[0-9]+]]:_(<3 x s32>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[UV]](<3 x s32>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s32>) = G_UNMERGE_VALUES %0
$vgpr0_vgpr1_vgpr2 = COPY %1
...

---

name: test_unmerge_s8_s16
Expand Down Expand Up @@ -1090,3 +1225,23 @@ body: |
$vgpr9_vgpr10_vgpr11 = COPY %8

...

---
name: test_unmerge_v3s32_v12s16
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5

; CHECK-LABEL: name: test_unmerge_v3s32_v12s16
; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<12 x s16>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
; CHECK-NEXT: [[UV:%[0-9]+]]:_(<3 x s32>), [[UV1:%[0-9]+]]:_(<3 x s32>) = G_UNMERGE_VALUES [[COPY]](<12 x s16>)
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[UV]](<3 x s32>)
; CHECK-NEXT: $vgpr3_vgpr4_vgpr5 = COPY [[UV1]](<3 x s32>)
%0:_(<12 x s16>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
%1:_(<3 x s32>), %2:_(<3 x s32>) = G_UNMERGE_VALUES %0
$vgpr0_vgpr1_vgpr2 = COPY %1
$vgpr3_vgpr4_vgpr5 = COPY %2

...