mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-29 06:06:06 +00:00
[NFC] Only expose getXXXSize functions in TypeSize
Currently 'TypeSize' exposes two functions that serve the same purpose:
- getFixedSize / getFixedValue
- getKnownMinSize / getKnownMinValue
source : bf82070ea4/llvm/include/llvm/Support/TypeSize.h (L337-L338)
This patch offers to remove one of the two and stick to a single function in the code base.
Differential Revision: https://reviews.llvm.org/D141134
This commit is contained in:
parent
b8576086c7
commit
dd56e1c92b
@ -311,9 +311,16 @@ public:
|
||||
// the exact size. If the type is a scalable vector, it will represent the known
|
||||
// minimum size.
|
||||
class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
|
||||
using UP = details::FixedOrScalableQuantity<TypeSize, uint64_t>;
|
||||
|
||||
TypeSize(const FixedOrScalableQuantity<TypeSize, uint64_t> &V)
|
||||
: FixedOrScalableQuantity(V) {}
|
||||
|
||||
// Make 'getFixedValue' private, it is exposed as 'getFixedSize' below.
|
||||
using UP::getFixedValue;
|
||||
// Make 'getKnownMinValue' private, it is exposed as 'getKnownMinSize' below.
|
||||
using UP::getKnownMinValue;
|
||||
|
||||
public:
|
||||
constexpr TypeSize(ScalarTy Quantity, bool Scalable)
|
||||
: FixedOrScalableQuantity(Quantity, Scalable) {}
|
||||
@ -399,7 +406,7 @@ public:
|
||||
/// Similar to the alignTo functions in MathExtras.h
|
||||
inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) {
|
||||
assert(Align != 0u && "Align must be non-zero");
|
||||
return {(Size.getKnownMinValue() + Align - 1) / Align * Align,
|
||||
return {(Size.getKnownMinSize() + Align - 1) / Align * Align,
|
||||
Size.isScalable()};
|
||||
}
|
||||
|
||||
|
@ -599,7 +599,7 @@ Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
|
||||
return nullptr;
|
||||
|
||||
// If we're not accessing anything in this constant, the result is undefined.
|
||||
if (Offset >= (int64_t)InitializerSize.getFixedValue())
|
||||
if (Offset >= (int64_t)InitializerSize.getFixedSize())
|
||||
return PoisonValue::get(IntType);
|
||||
|
||||
unsigned char RawBytes[32] = {0};
|
||||
|
@ -408,7 +408,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
|
||||
TypeSize TySize = DL.getTypeStoreSize(Ty);
|
||||
if (TySize.isScalable())
|
||||
return false;
|
||||
APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
|
||||
APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedSize());
|
||||
return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
|
||||
TLI);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
|
||||
// Given an array type, recursively traverse the elements.
|
||||
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Type *EltTy = ATy->getElementType();
|
||||
uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
|
||||
uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
|
||||
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
|
||||
ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
|
||||
StartingOffset + i * EltSize);
|
||||
@ -146,7 +146,7 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
|
||||
// Given an array type, recursively traverse the elements.
|
||||
if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
|
||||
Type *EltTy = ATy->getElementType();
|
||||
uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
|
||||
uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
|
||||
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
|
||||
computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
|
||||
StartingOffset + i * EltSize);
|
||||
|
@ -4062,11 +4062,11 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
|
||||
AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
|
||||
DAG.getVScale(dl, IntPtr,
|
||||
APInt(IntPtr.getScalarSizeInBits(),
|
||||
TySize.getKnownMinValue())));
|
||||
TySize.getKnownMinSize())));
|
||||
else
|
||||
AllocSize =
|
||||
DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
|
||||
DAG.getConstant(TySize.getFixedValue(), dl, IntPtr));
|
||||
DAG.getConstant(TySize.getFixedSize(), dl, IntPtr));
|
||||
|
||||
// Handle alignment. If the requested alignment is less than or equal to
|
||||
// the stack alignment, ignore it. If the size is greater than or equal to
|
||||
|
@ -218,7 +218,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI,
|
||||
// We can't subtract a fixed size from a scalable one, so in that case
|
||||
// assume the scalable value is of minimum size.
|
||||
TypeSize NewAllocSize =
|
||||
TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
|
||||
TypeSize::Fixed(AllocSize.getKnownMinSize()) - OffsetSize;
|
||||
if (HasAddressTaken(I, NewAllocSize))
|
||||
return true;
|
||||
break;
|
||||
|
@ -67,7 +67,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
|
||||
|
||||
getMemberOffsets()[i] = StructSize;
|
||||
// Consume space for this data item
|
||||
StructSize += DL.getTypeAllocSize(Ty).getFixedValue();
|
||||
StructSize += DL.getTypeAllocSize(Ty).getFixedSize();
|
||||
}
|
||||
|
||||
// Add padding to the end of the struct so that it could be put in an array
|
||||
|
@ -10468,7 +10468,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
|
||||
unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
|
||||
EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
|
||||
|
||||
if (SrcVTSize.getFixedValue() < VTSize) {
|
||||
if (SrcVTSize.getFixedSize() < VTSize) {
|
||||
assert(2 * SrcVTSize == VTSize);
|
||||
// We can pad out the smaller vector for free, so if it's part of a
|
||||
// shuffle...
|
||||
@ -10478,7 +10478,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (SrcVTSize.getFixedValue() != 2 * VTSize) {
|
||||
if (SrcVTSize.getFixedSize() != 2 * VTSize) {
|
||||
LLVM_DEBUG(
|
||||
dbgs() << "Reshuffle failed: result vector too small to extract\n");
|
||||
return SDValue();
|
||||
|
@ -791,7 +791,7 @@ public:
|
||||
|
||||
TypeSize TS = VT.getSizeInBits();
|
||||
// TODO: We should be able to use bic/bif too for SVE.
|
||||
return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
|
||||
return !TS.isScalable() && TS.getFixedSize() >= 64; // vector 'bic'
|
||||
}
|
||||
|
||||
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
|
||||
|
@ -650,7 +650,7 @@ public:
|
||||
continue;
|
||||
}
|
||||
CandidateTy Candidate(GV, K.second.size(),
|
||||
DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
|
||||
DL.getTypeAllocSize(GV->getValueType()).getFixedSize());
|
||||
if (MostUsed < Candidate)
|
||||
MostUsed = Candidate;
|
||||
}
|
||||
|
@ -1816,9 +1816,9 @@ auto HexagonVectorCombine::getSizeOf(const Type *Ty, SizeKind Kind) const
|
||||
auto *NcTy = const_cast<Type *>(Ty);
|
||||
switch (Kind) {
|
||||
case Store:
|
||||
return DL.getTypeStoreSize(NcTy).getFixedValue();
|
||||
return DL.getTypeStoreSize(NcTy).getFixedSize();
|
||||
case Alloc:
|
||||
return DL.getTypeAllocSize(NcTy).getFixedValue();
|
||||
return DL.getTypeAllocSize(NcTy).getFixedSize();
|
||||
}
|
||||
llvm_unreachable("Unhandled SizeKind enum");
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
|
||||
if (VT.getVectorMinNumElements() < MinElts)
|
||||
return;
|
||||
|
||||
unsigned Size = VT.getSizeInBits().getKnownMinValue();
|
||||
unsigned Size = VT.getSizeInBits().getKnownMinSize();
|
||||
const TargetRegisterClass *RC;
|
||||
if (Size <= RISCV::RVVBitsPerBlock)
|
||||
RC = &RISCV::VRRegClass;
|
||||
@ -1589,7 +1589,7 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
|
||||
|
||||
RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
|
||||
assert(VT.isScalableVector() && "Expecting a scalable vector type");
|
||||
unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
|
||||
unsigned KnownSize = VT.getSizeInBits().getKnownMinSize();
|
||||
if (VT.getVectorElementType() == MVT::i1)
|
||||
KnownSize *= 8;
|
||||
|
||||
@ -5443,7 +5443,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
|
||||
// Optimize for constant AVL
|
||||
if (isa<ConstantSDNode>(AVL)) {
|
||||
unsigned EltSize = VT.getScalarSizeInBits();
|
||||
unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
|
||||
unsigned MinSize = VT.getSizeInBits().getKnownMinSize();
|
||||
|
||||
unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
|
||||
unsigned MaxVLMAX =
|
||||
@ -6419,7 +6419,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
|
||||
return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
|
||||
}
|
||||
unsigned EltSize = VecVT.getScalarSizeInBits();
|
||||
unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
|
||||
unsigned MinSize = VecVT.getSizeInBits().getKnownMinSize();
|
||||
unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
|
||||
unsigned MaxVLMAX =
|
||||
RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
|
||||
|
@ -1008,7 +1008,7 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
|
||||
unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) {
|
||||
if (isa<ScalableVectorType>(Ty)) {
|
||||
const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
|
||||
const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
|
||||
const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinSize();
|
||||
const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock;
|
||||
return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize);
|
||||
}
|
||||
@ -1472,7 +1472,7 @@ unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) {
|
||||
TypeSize Size = DL.getTypeSizeInBits(Ty);
|
||||
if (Ty->isVectorTy()) {
|
||||
if (Size.isScalable() && ST->hasVInstructions())
|
||||
return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
|
||||
return divideCeil(Size.getKnownMinSize(), RISCV::RVVBitsPerBlock);
|
||||
|
||||
if (ST->useRVVForFixedLengthVectors())
|
||||
return divideCeil(Size, ST->getRealMinVLen());
|
||||
|
@ -543,7 +543,7 @@ static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR,
|
||||
if (!isAligned(I->getAlign(), Off))
|
||||
return false;
|
||||
|
||||
NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue());
|
||||
NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedSize());
|
||||
NeededAlign = std::max(NeededAlign, I->getAlign());
|
||||
}
|
||||
|
||||
|
@ -764,8 +764,8 @@ bool TypeInfer::EnforceSameNumElts(TypeSetByHwMode &V, TypeSetByHwMode &W) {
|
||||
namespace {
|
||||
struct TypeSizeComparator {
|
||||
bool operator()(const TypeSize &LHS, const TypeSize &RHS) const {
|
||||
return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
|
||||
std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
|
||||
return std::make_tuple(LHS.isScalable(), LHS.getKnownMinSize()) <
|
||||
std::make_tuple(RHS.isScalable(), RHS.getKnownMinSize());
|
||||
}
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
Loading…
x
Reference in New Issue
Block a user