mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-28 03:36:06 +00:00
[KnownBits] Add KnownBits::add and KnownBits::sub helper wrappers. (#99468)
This commit is contained in:
parent
db0603cb7b
commit
11ba72e651
@ -329,6 +329,18 @@ public:
|
||||
static KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS,
|
||||
const KnownBits &Borrow);
|
||||
|
||||
/// Compute knownbits resulting from addition of LHS and RHS.
|
||||
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS,
|
||||
bool NSW = false, bool NUW = false) {
|
||||
return computeForAddSub(/*Add=*/true, NSW, NUW, LHS, RHS);
|
||||
}
|
||||
|
||||
/// Compute knownbits resulting from subtraction of LHS and RHS.
|
||||
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS,
|
||||
bool NSW = false, bool NUW = false) {
|
||||
return computeForAddSub(/*Add=*/false, NSW, NUW, LHS, RHS);
|
||||
}
|
||||
|
||||
/// Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
|
||||
static KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS);
|
||||
|
||||
|
@ -1399,13 +1399,11 @@ static void computeKnownBitsFromOperator(const Operator *I,
|
||||
|
||||
// Note that inbounds does *not* guarantee nsw for the addition, as only
|
||||
// the offset is signed, while the base address is unsigned.
|
||||
Known = KnownBits::computeForAddSub(
|
||||
/*Add=*/true, /*NSW=*/false, /* NUW=*/false, Known, IndexBits);
|
||||
Known = KnownBits::add(Known, IndexBits);
|
||||
}
|
||||
if (!Known.isUnknown() && !AccConstIndices.isZero()) {
|
||||
KnownBits Index = KnownBits::makeConstant(AccConstIndices);
|
||||
Known = KnownBits::computeForAddSub(
|
||||
/*Add=*/true, /*NSW=*/false, /* NUW=*/false, Known, Index);
|
||||
Known = KnownBits::add(Known, Index);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1802,9 +1800,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
|
||||
Known = computeKnownBitsForHorizontalOperation(
|
||||
I, DemandedElts, Depth, Q,
|
||||
[](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
|
||||
return KnownBits::computeForAddSub(/*Add=*/true, /*NSW=*/false,
|
||||
/*NUW=*/false, KnownLHS,
|
||||
KnownRHS);
|
||||
return KnownBits::add(KnownLHS, KnownRHS);
|
||||
});
|
||||
break;
|
||||
}
|
||||
@ -1821,9 +1817,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
|
||||
Known = computeKnownBitsForHorizontalOperation(
|
||||
I, DemandedElts, Depth, Q,
|
||||
[](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
|
||||
return KnownBits::computeForAddSub(/*Add=*/false, /*NSW=*/false,
|
||||
/*NUW=*/false, KnownLHS,
|
||||
KnownRHS);
|
||||
return KnownBits::sub(KnownLHS, KnownRHS);
|
||||
});
|
||||
break;
|
||||
}
|
||||
@ -2642,8 +2636,7 @@ static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
|
||||
isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
|
||||
return true;
|
||||
|
||||
return KnownBits::computeForAddSub(/*Add=*/true, NSW, NUW, XKnown, YKnown)
|
||||
.isNonZero();
|
||||
return KnownBits::add(XKnown, YKnown, NSW, NUW).isNonZero();
|
||||
}
|
||||
|
||||
static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
|
||||
|
@ -271,8 +271,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
|
||||
Depth + 1);
|
||||
computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts,
|
||||
Depth + 1);
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/false, /*NSW=*/false,
|
||||
/* NUW=*/false, Known, Known2);
|
||||
Known = KnownBits::sub(Known, Known2);
|
||||
break;
|
||||
}
|
||||
case TargetOpcode::G_XOR: {
|
||||
@ -298,8 +297,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
|
||||
Depth + 1);
|
||||
computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts,
|
||||
Depth + 1);
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/true, /*NSW=*/false,
|
||||
/* NUW=*/false, Known, Known2);
|
||||
Known = KnownBits::add(Known, Known2);
|
||||
break;
|
||||
}
|
||||
case TargetOpcode::G_AND: {
|
||||
@ -571,8 +569,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
|
||||
// Sign extend the extracted value using shift left and arithmetic shift
|
||||
// right.
|
||||
KnownBits ExtKnown = KnownBits::makeConstant(APInt(BitWidth, BitWidth));
|
||||
KnownBits ShiftKnown = KnownBits::computeForAddSub(
|
||||
/*Add=*/false, /*NSW=*/false, /* NUW=*/false, ExtKnown, WidthKnown);
|
||||
KnownBits ShiftKnown = KnownBits::sub(ExtKnown, WidthKnown);
|
||||
Known = KnownBits::ashr(KnownBits::shl(Known, ShiftKnown), ShiftKnown);
|
||||
break;
|
||||
}
|
||||
|
@ -1928,10 +1928,9 @@ bool AMDGPUDAGToDAGISel::checkFlatScratchSVSSwizzleBug(
|
||||
// from the two low order bits (i.e. from bit 1 into bit 2) when adding
|
||||
// voffset to (soffset + inst_offset).
|
||||
KnownBits VKnown = CurDAG->computeKnownBits(VAddr);
|
||||
KnownBits SKnown = KnownBits::computeForAddSub(
|
||||
/*Add=*/true, /*NSW=*/false, /*NUW=*/false,
|
||||
CurDAG->computeKnownBits(SAddr),
|
||||
KnownBits::makeConstant(APInt(32, ImmOffset)));
|
||||
KnownBits SKnown =
|
||||
KnownBits::add(CurDAG->computeKnownBits(SAddr),
|
||||
KnownBits::makeConstant(APInt(32, ImmOffset)));
|
||||
uint64_t VMax = VKnown.getMaxValue().getZExtValue();
|
||||
uint64_t SMax = SKnown.getMaxValue().getZExtValue();
|
||||
return (VMax & 3) + (SMax & 3) >= 4;
|
||||
|
@ -4466,9 +4466,8 @@ bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
|
||||
// from the two low order bits (i.e. from bit 1 into bit 2) when adding
|
||||
// voffset to (soffset + inst_offset).
|
||||
auto VKnown = KB->getKnownBits(VAddr);
|
||||
auto SKnown = KnownBits::computeForAddSub(
|
||||
/*Add=*/true, /*NSW=*/false, /*NUW=*/false, KB->getKnownBits(SAddr),
|
||||
KnownBits::makeConstant(APInt(32, ImmOffset)));
|
||||
auto SKnown = KnownBits::add(KB->getKnownBits(SAddr),
|
||||
KnownBits::makeConstant(APInt(32, ImmOffset)));
|
||||
uint64_t VMax = VKnown.getMaxValue().getZExtValue();
|
||||
uint64_t SMax = SKnown.getMaxValue().getZExtValue();
|
||||
return (VMax & 3) + (SMax & 3) >= 4;
|
||||
|
@ -20186,9 +20186,8 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
||||
// CSINV: KnownOp0 or ~KnownOp1
|
||||
// CSNEG: KnownOp0 or KnownOp1 * -1
|
||||
if (Op.getOpcode() == ARMISD::CSINC)
|
||||
KnownOp1 = KnownBits::computeForAddSub(
|
||||
/*Add=*/true, /*NSW=*/false, /*NUW=*/false, KnownOp1,
|
||||
KnownBits::makeConstant(APInt(32, 1)));
|
||||
KnownOp1 =
|
||||
KnownBits::add(KnownOp1, KnownBits::makeConstant(APInt(32, 1)));
|
||||
else if (Op.getOpcode() == ARMISD::CSINV)
|
||||
std::swap(KnownOp1.Zero, KnownOp1.One);
|
||||
else if (Op.getOpcode() == ARMISD::CSNEG)
|
||||
|
@ -37135,12 +37135,9 @@ static void computeKnownBitsForPSADBW(SDValue LHS, SDValue RHS,
|
||||
Known2 = DAG.computeKnownBits(LHS, DemandedSrcElts, Depth + 1);
|
||||
Known = KnownBits::abdu(Known, Known2).zext(16);
|
||||
// Known = (((D0 + D1) + (D2 + D3)) + ((D4 + D5) + (D6 + D7)))
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/true, /*NSW=*/true, /*NUW=*/true,
|
||||
Known, Known);
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/true, /*NSW=*/true, /*NUW=*/true,
|
||||
Known, Known);
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/true, /*NSW=*/true, /*NUW=*/true,
|
||||
Known, Known);
|
||||
Known = KnownBits::add(Known, Known, /*NSW=*/true, /*NUW=*/true);
|
||||
Known = KnownBits::add(Known, Known, /*NSW=*/true, /*NUW=*/true);
|
||||
Known = KnownBits::add(Known, Known, /*NSW=*/true, /*NUW=*/true);
|
||||
Known = Known.zext(64);
|
||||
}
|
||||
|
||||
@ -37163,8 +37160,7 @@ static void computeKnownBitsForPMADDWD(SDValue LHS, SDValue RHS,
|
||||
KnownBits RHSHi = DAG.computeKnownBits(RHS, DemandedHiElts, Depth + 1);
|
||||
KnownBits Lo = KnownBits::mul(LHSLo.sext(32), RHSLo.sext(32));
|
||||
KnownBits Hi = KnownBits::mul(LHSHi.sext(32), RHSHi.sext(32));
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/true, /*NSW=*/false,
|
||||
/*NUW=*/false, Lo, Hi);
|
||||
Known = KnownBits::add(Lo, Hi, /*NSW=*/false, /*NUW=*/false);
|
||||
}
|
||||
|
||||
static void computeKnownBitsForPMADDUBSW(SDValue LHS, SDValue RHS,
|
||||
|
@ -561,7 +561,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
|
||||
// Otherwise just compute the known bits of the result.
|
||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||
bool NUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
|
||||
Known = KnownBits::computeForAddSub(true, NSW, NUW, LHSKnown, RHSKnown);
|
||||
Known = KnownBits::add(LHSKnown, RHSKnown, NSW, NUW);
|
||||
break;
|
||||
}
|
||||
case Instruction::Sub: {
|
||||
@ -595,7 +595,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
|
||||
// Otherwise just compute the known bits of the result.
|
||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||
bool NUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
|
||||
Known = KnownBits::computeForAddSub(false, NSW, NUW, LHSKnown, RHSKnown);
|
||||
Known = KnownBits::sub(LHSKnown, RHSKnown, NSW, NUW);
|
||||
break;
|
||||
}
|
||||
case Instruction::Mul: {
|
||||
@ -1232,8 +1232,7 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
|
||||
|
||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||
bool NUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
|
||||
Known =
|
||||
KnownBits::computeForAddSub(/*Add=*/true, NSW, NUW, LHSKnown, RHSKnown);
|
||||
Known = KnownBits::add(LHSKnown, RHSKnown, NSW, NUW);
|
||||
computeKnownBitsFromContext(I, Known, Depth, Q);
|
||||
break;
|
||||
}
|
||||
@ -1250,8 +1249,7 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
|
||||
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
|
||||
bool NUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
|
||||
llvm::computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, Q);
|
||||
Known = KnownBits::computeForAddSub(/*Add=*/false, NSW, NUW, LHSKnown,
|
||||
RHSKnown);
|
||||
Known = KnownBits::sub(LHSKnown, RHSKnown, NSW, NUW);
|
||||
computeKnownBitsFromContext(I, Known, Depth, Q);
|
||||
break;
|
||||
}
|
||||
|
@ -300,6 +300,20 @@ TEST(KnownBitsTest, BinaryExhaustive) {
|
||||
return Known1 ^ Known2;
|
||||
},
|
||||
[](const APInt &N1, const APInt &N2) { return N1 ^ N2; });
|
||||
testBinaryOpExhaustive(
|
||||
"add",
|
||||
[](const KnownBits &Known1, const KnownBits &Known2) {
|
||||
return KnownBits::add(Known1, Known2);
|
||||
},
|
||||
[](const APInt &N1, const APInt &N2) { return N1 + N2; },
|
||||
/*CheckOptimality=*/false);
|
||||
testBinaryOpExhaustive(
|
||||
"sub",
|
||||
[](const KnownBits &Known1, const KnownBits &Known2) {
|
||||
return KnownBits::sub(Known1, Known2);
|
||||
},
|
||||
[](const APInt &N1, const APInt &N2) { return N1 - N2; },
|
||||
/*CheckOptimality=*/false);
|
||||
testBinaryOpExhaustive("umax", KnownBits::umax, APIntOps::umax);
|
||||
testBinaryOpExhaustive("umin", KnownBits::umin, APIntOps::umin);
|
||||
testBinaryOpExhaustive("smax", KnownBits::smax, APIntOps::smax);
|
||||
|
Loading…
x
Reference in New Issue
Block a user