Skip to content

Commit

Permalink
[CodeGen] Avoid sinking vector comparisons during CodeGenPrepare
Browse files Browse the repository at this point in the history
Whilst reviewing PR llvm#109289 and doing some analysis with various
tests involving predicated blocks I noticed that we're making
codegen and performance worse by sinking vector comparisons
multiple times into blocks. It looks like the sinkCmpExpression
in CodeGenPrepare was written for scalar comparisons where there
is only a single condition register, whereas vector comparisons
typically produce a vector result. For some targets, such a NEON
or SVE, there are multiple allocatable vector registers that can
store the result and so we should avoid sinking in that case.
  • Loading branch information
david-arm committed Oct 25, 2024
1 parent 6b93bd0 commit 4de421e
Show file tree
Hide file tree
Showing 9 changed files with 72 additions and 79 deletions.
26 changes: 5 additions & 21 deletions llvm/include/llvm/CodeGen/TargetLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -497,10 +497,10 @@ class TargetLoweringBase {
return true;
}

/// Return true if multiple condition registers are available.
bool hasMultipleConditionRegisters() const {
return HasMultipleConditionRegisters;
}
/// Return true if multiple (allocatable) predicate registers are available
/// for \p VT. If there is only a single register the code generator will
/// sink comparisons into the blocks of their users.
virtual bool hasMultiplePredicateRegisters(EVT VT) const { return false; }

/// Return true if the target has BitExtract instructions.
bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
Expand Down Expand Up @@ -2389,7 +2389,7 @@ class TargetLoweringBase {
EVT VT) const {
// If a target has multiple condition registers, then it likely has logical
// operations on those registers.
if (hasMultipleConditionRegisters())
if (hasMultiplePredicateRegisters(VT))
return false;
// Only do the transform if the value won't be split into multiple
// registers.
Expand Down Expand Up @@ -2496,15 +2496,6 @@ class TargetLoweringBase {
StackPointerRegisterToSaveRestore = R;
}

/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
/// registers, the code generator will not aggressively sink comparisons into
/// the blocks of their users.
void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
HasMultipleConditionRegisters = hasManyRegs;
}

/// Tells the code generator that the target has BitExtract instructions.
/// The code generator will aggressively sink "shift"s into the blocks of
/// their users if the users will generate "and" instructions which can be
Expand Down Expand Up @@ -3470,13 +3461,6 @@ class TargetLoweringBase {
private:
const TargetMachine &TM;

/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
/// registers, the code generator will not aggressively sink comparisons into
/// the blocks of their users.
bool HasMultipleConditionRegisters;

/// Tells the code generator that the target has BitExtract instructions.
/// The code generator will aggressively sink "shift"s into the blocks of
/// their users if the users will generate "and" instructions which can be
Expand Down
8 changes: 5 additions & 3 deletions llvm/lib/CodeGen/CodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1771,8 +1771,10 @@ bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
/// lose; some adjustment may be wanted there.
///
/// Return true if any changes are made.
static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
if (TLI.hasMultipleConditionRegisters())
static bool sinkCmpExpression(const DataLayout &DL, CmpInst *Cmp,
const TargetLowering &TLI) {
EVT ResVT = TLI.getValueType(DL, Cmp->getType());
if (TLI.hasMultiplePredicateRegisters(ResVT))
return false;

// Avoid sinking soft-FP comparisons, since this can move them into a loop.
Expand Down Expand Up @@ -2137,7 +2139,7 @@ static bool adjustIsPower2Test(CmpInst *Cmp, const TargetLowering &TLI,
}

bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
if (sinkCmpExpression(Cmp, *TLI))
if (sinkCmpExpression(*DL, Cmp, *TLI))
return true;

if (combineToUAddWithOverflow(Cmp, ModifiedDT))
Expand Down
1 change: 0 additions & 1 deletion llvm/lib/CodeGen/TargetLoweringBase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -625,7 +625,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm)
MaxGluedStoresPerMemcpy = 0;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
HasMultipleConditionRegisters = false;
HasExtractBitsInsn = false;
JumpIsExpensive = JumpIsExpensiveOverride;
PredictableSelectIsExpensive = false;
Expand Down
4 changes: 4 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -1358,6 +1358,10 @@ class AArch64TargetLowering : public TargetLowering {
unsigned getMinimumJumpTableEntries() const override;

bool softPromoteHalfType() const override { return true; }

virtual bool hasMultiplePredicateRegisters(EVT VT) const {
return VT.isVector();
}
};

namespace AArch64 {
Expand Down
8 changes: 0 additions & 8 deletions llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -583,14 +583,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setSchedulingPreference(Sched::RegPressure);
setJumpIsExpensive(true);

// FIXME: This is only partially true. If we have to do vector compares, any
// SGPR pair can be a condition register. If we have a uniform condition, we
// are better off doing SALU operations, where there is only one SCC. For now,
// we don't have a way of knowing during instruction selection if a condition
// will be uniform and we always use vector compares. Assume we are using
// vector compares until that is fixed.
setHasMultipleConditionRegisters(true);

setMinCmpXchgSizeInBits(32);
setSupportsUnalignedAtomics(false);

Expand Down
10 changes: 10 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,16 @@ class AMDGPUTargetLowering : public TargetLowering {
MVT getFenceOperandTy(const DataLayout &DL) const override {
return MVT::i32;
}

virtual bool hasMultiplePredicateRegisters(EVT VT) const override {
// FIXME: This is only partially true. If we have to do vector compares,
// any SGPR pair can be a condition register. If we have a uniform
// condition, we are better off doing SALU operations, where there is only
// one SCC. For now, we don't have a way of knowing during instruction
// selection if a condition will be uniform and we always use vector
// compares. Assume we are using vector compares until that is fixed.
return true;
}
};

namespace AMDGPUISD {
Expand Down
10 changes: 7 additions & 3 deletions llvm/lib/Target/PowerPC/PPCISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1454,10 +1454,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,

// With 32 condition bits, we don't need to sink (and duplicate) compares
// aggressively in CodeGenPrep.
if (Subtarget.useCRBits()) {
setHasMultipleConditionRegisters();
if (Subtarget.useCRBits())
setJumpIsExpensive();
}

// TODO: The default entry number is set to 64. This stops most jump table
// generation on PPC. But it is good for current PPC HWs because the indirect
Expand Down Expand Up @@ -19044,3 +19042,9 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}

bool PPCTargetLowering::hasMultiplePredicateRegisters(EVT VT) const {
// With 32 condition bits, we don't need to sink (and duplicate) compares
// aggressively in CodeGenPrep.
return Subtarget.useCRBits();
}
2 changes: 2 additions & 0 deletions llvm/lib/Target/PowerPC/PPCISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -1490,6 +1490,8 @@ namespace llvm {
/// through to determine the optimal load/store instruction format.
unsigned computeMOFlags(const SDNode *Parent, SDValue N,
SelectionDAG &DAG) const;

virtual bool hasMultiplePredicateRegisters(EVT VT) const override;
}; // end class PPCTargetLowering

namespace PPC {
Expand Down
82 changes: 39 additions & 43 deletions llvm/test/CodeGen/AArch64/no-sink-vector-cmp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,68 +6,64 @@ target triple = "aarch64-unknown-linux-gnu"
define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) {
; CHECK-LABEL: vector_loop_with_icmp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov w8, #15 // =0xf
; CHECK-NEXT: mov w9, #15 // =0xf
; CHECK-NEXT: mov w10, #4 // =0x4
; CHECK-NEXT: adrp x9, .LCPI0_0
; CHECK-NEXT: adrp x8, .LCPI0_0
; CHECK-NEXT: adrp x11, .LCPI0_1
; CHECK-NEXT: dup v0.2d, x8
; CHECK-NEXT: dup v0.2d, x9
; CHECK-NEXT: dup v1.2d, x10
; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI0_0]
; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI0_0]
; CHECK-NEXT: ldr q3, [x11, :lo12:.LCPI0_1]
; CHECK-NEXT: add x9, x0, #8
; CHECK-NEXT: mov w10, #16 // =0x10
; CHECK-NEXT: mov w11, #1 // =0x1
; CHECK-NEXT: add x8, x0, #8
; CHECK-NEXT: mov w9, #16 // =0x10
; CHECK-NEXT: mov w10, #1 // =0x1
; CHECK-NEXT: b .LBB0_2
; CHECK-NEXT: .LBB0_1: // %pred.store.continue18
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: add v2.2d, v2.2d, v1.2d
; CHECK-NEXT: add v3.2d, v3.2d, v1.2d
; CHECK-NEXT: subs x10, x10, #4
; CHECK-NEXT: add x9, x9, #16
; CHECK-NEXT: subs x9, x9, #4
; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: b.eq .LBB0_10
; CHECK-NEXT: .LBB0_2: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: cmhi v4.2d, v0.2d, v3.2d
; CHECK-NEXT: xtn v4.2s, v4.2d
; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
; CHECK-NEXT: umov w12, v4.h[0]
; CHECK-NEXT: tbz w12, #0, .LBB0_4
; CHECK-NEXT: // %bb.3: // %pred.store.if
; CHECK-NEXT: cmhi v4.2d, v0.2d, v2.2d
; CHECK-NEXT: cmhi v5.2d, v0.2d, v3.2d
; CHECK-NEXT: uzp1 v4.4s, v5.4s, v4.4s
; CHECK-NEXT: xtn v4.4h, v4.4s
; CHECK-NEXT: umov w11, v4.h[0]
; CHECK-NEXT: tbnz w11, #0, .LBB0_6
; CHECK-NEXT: // %bb.3: // %pred.store.continue
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: stur w11, [x9, #-8]
; CHECK-NEXT: .LBB0_4: // %pred.store.continue
; CHECK-NEXT: umov w11, v4.h[1]
; CHECK-NEXT: tbnz w11, #0, .LBB0_7
; CHECK-NEXT: .LBB0_4: // %pred.store.continue6
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: dup v4.2d, x8
; CHECK-NEXT: cmhi v4.2d, v4.2d, v3.2d
; CHECK-NEXT: xtn v4.2s, v4.2d
; CHECK-NEXT: uzp1 v4.4h, v4.4h, v0.4h
; CHECK-NEXT: umov w12, v4.h[1]
; CHECK-NEXT: tbz w12, #0, .LBB0_6
; CHECK-NEXT: // %bb.5: // %pred.store.if5
; CHECK-NEXT: umov w11, v4.h[2]
; CHECK-NEXT: tbnz w11, #0, .LBB0_8
; CHECK-NEXT: .LBB0_5: // %pred.store.continue8
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: stur w11, [x9, #-4]
; CHECK-NEXT: .LBB0_6: // %pred.store.continue6
; CHECK-NEXT: umov w11, v4.h[3]
; CHECK-NEXT: tbz w11, #0, .LBB0_1
; CHECK-NEXT: b .LBB0_9
; CHECK-NEXT: .LBB0_6: // %pred.store.if
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: dup v4.2d, x8
; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
; CHECK-NEXT: xtn v4.2s, v4.2d
; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
; CHECK-NEXT: umov w12, v4.h[2]
; CHECK-NEXT: tbz w12, #0, .LBB0_8
; CHECK-NEXT: // %bb.7: // %pred.store.if7
; CHECK-NEXT: stur w10, [x8, #-8]
; CHECK-NEXT: umov w11, v4.h[1]
; CHECK-NEXT: tbz w11, #0, .LBB0_4
; CHECK-NEXT: .LBB0_7: // %pred.store.if5
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: str w11, [x9]
; CHECK-NEXT: .LBB0_8: // %pred.store.continue8
; CHECK-NEXT: stur w10, [x8, #-4]
; CHECK-NEXT: umov w11, v4.h[2]
; CHECK-NEXT: tbz w11, #0, .LBB0_5
; CHECK-NEXT: .LBB0_8: // %pred.store.if7
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: dup v4.2d, x8
; CHECK-NEXT: cmhi v4.2d, v4.2d, v2.2d
; CHECK-NEXT: xtn v4.2s, v4.2d
; CHECK-NEXT: uzp1 v4.4h, v0.4h, v4.4h
; CHECK-NEXT: umov w12, v4.h[3]
; CHECK-NEXT: tbz w12, #0, .LBB0_1
; CHECK-NEXT: // %bb.9: // %pred.store.if9
; CHECK-NEXT: str w10, [x8]
; CHECK-NEXT: umov w11, v4.h[3]
; CHECK-NEXT: tbz w11, #0, .LBB0_1
; CHECK-NEXT: .LBB0_9: // %pred.store.if9
; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: str w11, [x9, #4]
; CHECK-NEXT: str w10, [x8, #4]
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_10: // %for.cond.cleanup
; CHECK-NEXT: ret
Expand Down

0 comments on commit 4de421e

Please sign in to comment.