0
0
mirror of https://github.com/llvm/llvm-project.git synced 2025-04-21 11:36:55 +00:00

[ValueTracking] Fix bit width handling in computeKnownBits() for GEPs ()

For GEPs, we have three bit widths involved: The pointer bit width, the
index bit width, and the bit width of the GEP operands.

The correct behavior here is:
* We need to sextOrTrunc the GEP operand to the index width *before*
multiplying by the scale.
* If the index width and pointer width differ, GEP only ever modifies
the low bits. Adds should not overflow into the high bits.

I'm testing this via unit tests because it's a bit tricky to test in IR
with InstCombine canonicalization getting in the way.
This commit is contained in:
Nikita Popov 2025-02-04 14:29:58 +01:00 committed by GitHub
parent 6fc66d322b
commit 3bd11b502c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 42 additions and 36 deletions
llvm
lib/Analysis
unittests/Analysis

@ -1426,7 +1426,22 @@ static void computeKnownBitsFromOperator(const Operator *I,
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// Accumulate the constant indices in a separate variable
// to minimize the number of calls to computeForAddSub.
APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType());
APInt AccConstIndices(IndexWidth, 0);
auto AddIndexToKnown = [&](KnownBits IndexBits) {
if (IndexWidth == BitWidth) {
// Note that inbounds does *not* guarantee nsw for the addition, as only
// the offset is signed, while the base address is unsigned.
Known = KnownBits::add(Known, IndexBits);
} else {
// If the index width is smaller than the pointer width, only add the
// value to the low bits.
assert(IndexWidth < BitWidth &&
"Index width can't be larger than pointer width");
Known.insertBits(KnownBits::add(Known.trunc(IndexWidth), IndexBits), 0);
}
};
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
@ -1464,43 +1479,34 @@ static void computeKnownBitsFromOperator(const Operator *I,
break;
}
unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
KnownBits IndexBits(IndexBitWidth);
computeKnownBits(Index, IndexBits, Depth + 1, Q);
TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL);
uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
KnownBits ScalingFactor(IndexBitWidth);
TypeSize Stride = GTI.getSequentialElementStride(Q.DL);
uint64_t StrideInBytes = Stride.getKnownMinValue();
if (!Stride.isScalable()) {
// Fast path for constant offset.
if (auto *CI = dyn_cast<ConstantInt>(Index)) {
AccConstIndices +=
CI->getValue().sextOrTrunc(IndexWidth) * StrideInBytes;
continue;
}
}
KnownBits IndexBits =
computeKnownBits(Index, Depth + 1, Q).sextOrTrunc(IndexWidth);
KnownBits ScalingFactor(IndexWidth);
// Multiply by current sizeof type.
// &A[i] == A + i * sizeof(*A[i]).
if (IndexTypeSize.isScalable()) {
if (Stride.isScalable()) {
// For scalable types the only thing we know about sizeof is
// that this is a multiple of the minimum size.
ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
} else if (IndexBits.isConstant()) {
APInt IndexConst = IndexBits.getConstant();
APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
IndexConst *= ScalingFactor;
AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
continue;
ScalingFactor.Zero.setLowBits(llvm::countr_zero(StrideInBytes));
} else {
ScalingFactor =
KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes));
}
IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
// If the offsets have a different width from the pointer, according
// to the language reference we need to sign-extend or truncate them
// to the width of the pointer.
IndexBits = IndexBits.sextOrTrunc(BitWidth);
// Note that inbounds does *not* guarantee nsw for the addition, as only
// the offset is signed, while the base address is unsigned.
Known = KnownBits::add(Known, IndexBits);
}
if (!Known.isUnknown() && !AccConstIndices.isZero()) {
KnownBits Index = KnownBits::makeConstant(AccConstIndices);
Known = KnownBits::add(Known, Index);
AddIndexToKnown(KnownBits::mul(IndexBits, ScalingFactor));
}
if (!Known.isUnknown() && !AccConstIndices.isZero())
AddIndexToKnown(KnownBits::makeConstant(AccConstIndices));
break;
}
case Instruction::PHI: {

@ -2680,7 +2680,7 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAbsoluteSymbol) {
}
TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) {
// FIXME: The index should be extended before multiplying with the scale.
// The index should be extended before multiplying with the scale.
parseAssembly(R"(
target datalayout = "p:16:16:16"
@ -2692,12 +2692,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) {
}
)");
KnownBits Known = computeKnownBits(A, M->getDataLayout());
EXPECT_EQ(~64 & 0x7fff, Known.Zero);
EXPECT_EQ(64, Known.One);
EXPECT_EQ(~320 & 0x7fff, Known.Zero);
EXPECT_EQ(320, Known.One);
}
TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) {
// FIXME: GEP should only affect the index width.
// GEP should only affect the index width.
parseAssembly(R"(
target datalayout = "p:16:16:16:8"
@ -2710,8 +2710,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) {
}
)");
KnownBits Known = computeKnownBits(A, M->getDataLayout());
EXPECT_EQ(0x7eff, Known.Zero);
EXPECT_EQ(0x100, Known.One);
EXPECT_EQ(0x7fff, Known.Zero);
EXPECT_EQ(0, Known.One);
}
TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {