mirror of
https://github.com/llvm/llvm-project.git
synced 2025-05-02 18:46:05 +00:00
[SVE] Remove calls to VectorType::getNumElements from CodeGen
Reviewers: efriedma, fpetrogalli, sdesmalen, RKSimon, arsenm Reviewed By: RKSimon Subscribers: wdng, tschuett, hiraditya, rkruppe, psnobl, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D82210
This commit is contained in:
parent
469da663f2
commit
ff5b9a7b3b
@ -5329,7 +5329,7 @@ bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
|
||||
if (!RewriteGEP && Ops.size() == 2)
|
||||
return false;
|
||||
|
||||
unsigned NumElts = cast<VectorType>(Ptr->getType())->getNumElements();
|
||||
unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
|
||||
|
||||
IRBuilder<> Builder(MemoryInst);
|
||||
|
||||
@ -6628,7 +6628,7 @@ bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
|
||||
if (!NewType)
|
||||
return false;
|
||||
|
||||
VectorType *SVIVecType = cast<VectorType>(SVI->getType());
|
||||
auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
|
||||
assert(!NewType->isVectorTy() && "Expected a scalar type!");
|
||||
assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
|
||||
"Expected a type of the same size!");
|
||||
|
@ -125,7 +125,8 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
|
||||
if (!FMF.allowReassoc())
|
||||
Rdx = getOrderedReduction(Builder, Acc, Vec, getOpcode(ID), MRK);
|
||||
else {
|
||||
if (!isPowerOf2_32(cast<VectorType>(Vec->getType())->getNumElements()))
|
||||
if (!isPowerOf2_32(
|
||||
cast<FixedVectorType>(Vec->getType())->getNumElements()))
|
||||
continue;
|
||||
|
||||
Rdx = getShuffleReduction(Builder, Vec, getOpcode(ID), MRK);
|
||||
@ -146,7 +147,8 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
|
||||
case Intrinsic::experimental_vector_reduce_fmax:
|
||||
case Intrinsic::experimental_vector_reduce_fmin: {
|
||||
Value *Vec = II->getArgOperand(0);
|
||||
if (!isPowerOf2_32(cast<VectorType>(Vec->getType())->getNumElements()))
|
||||
if (!isPowerOf2_32(
|
||||
cast<FixedVectorType>(Vec->getType())->getNumElements()))
|
||||
continue;
|
||||
|
||||
Rdx = getShuffleReduction(Builder, Vec, getOpcode(ID), MRK);
|
||||
|
@ -1059,7 +1059,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
|
||||
// splat vector.
|
||||
unsigned VectorWidth = 0;
|
||||
if (auto *VT = dyn_cast<VectorType>(U.getType()))
|
||||
VectorWidth = VT->getNumElements();
|
||||
VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
|
||||
|
||||
// We might need to splat the base pointer into a vector if the offsets
|
||||
// are vectors.
|
||||
@ -1946,7 +1946,7 @@ bool IRTranslator::translateInsertElement(const User &U,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
// If it is a <1 x Ty> vector, use the scalar as it is
|
||||
// not a legal vector type in LLT.
|
||||
if (cast<VectorType>(U.getType())->getNumElements() == 1)
|
||||
if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
|
||||
return translateCopy(U, *U.getOperand(1), MIRBuilder);
|
||||
|
||||
Register Res = getOrCreateVReg(U);
|
||||
@ -1961,7 +1961,7 @@ bool IRTranslator::translateExtractElement(const User &U,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
// If it is a <1 x Ty> vector, use the scalar as it is
|
||||
// not a legal vector type in LLT.
|
||||
if (cast<VectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
|
||||
if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
|
||||
return translateCopy(U, *U.getOperand(0), MIRBuilder);
|
||||
|
||||
Register Res = getOrCreateVReg(U);
|
||||
|
@ -280,7 +280,7 @@ static bool isReInterleaveMask(ArrayRef<int> Mask, unsigned &Factor,
|
||||
|
||||
bool InterleavedAccess::lowerInterleavedLoad(
|
||||
LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts) {
|
||||
if (!LI->isSimple())
|
||||
if (!LI->isSimple() || isa<ScalableVectorType>(LI->getType()))
|
||||
return false;
|
||||
|
||||
SmallVector<ShuffleVectorInst *, 4> Shuffles;
|
||||
@ -308,7 +308,8 @@ bool InterleavedAccess::lowerInterleavedLoad(
|
||||
|
||||
unsigned Factor, Index;
|
||||
|
||||
unsigned NumLoadElements = cast<VectorType>(LI->getType())->getNumElements();
|
||||
unsigned NumLoadElements =
|
||||
cast<FixedVectorType>(LI->getType())->getNumElements();
|
||||
// Check if the first shufflevector is DE-interleave shuffle.
|
||||
if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index,
|
||||
MaxFactor, NumLoadElements))
|
||||
@ -421,13 +422,13 @@ bool InterleavedAccess::lowerInterleavedStore(
|
||||
return false;
|
||||
|
||||
ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand());
|
||||
if (!SVI || !SVI->hasOneUse())
|
||||
if (!SVI || !SVI->hasOneUse() || isa<ScalableVectorType>(SVI->getType()))
|
||||
return false;
|
||||
|
||||
// Check if the shufflevector is RE-interleave shuffle.
|
||||
unsigned Factor;
|
||||
unsigned OpNumElts =
|
||||
cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
|
||||
cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
|
||||
if (!isReInterleaveMask(SVI->getShuffleMask(), Factor, MaxFactor, OpNumElts))
|
||||
return false;
|
||||
|
||||
|
@ -1200,7 +1200,8 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
|
||||
IRBuilder<> Builder(InsertionPoint);
|
||||
Type *ETy = InterleavedLoad.front().SVI->getType()->getElementType();
|
||||
unsigned ElementsPerSVI =
|
||||
InterleavedLoad.front().SVI->getType()->getNumElements();
|
||||
cast<FixedVectorType>(InterleavedLoad.front().SVI->getType())
|
||||
->getNumElements();
|
||||
FixedVectorType *ILTy = FixedVectorType::get(ETy, Factor * ElementsPerSVI);
|
||||
|
||||
SmallVector<unsigned, 4> Indices;
|
||||
|
@ -19,7 +19,7 @@ using namespace llvm;
|
||||
|
||||
LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
|
||||
if (auto VTy = dyn_cast<VectorType>(&Ty)) {
|
||||
auto NumElements = VTy->getNumElements();
|
||||
auto NumElements = cast<FixedVectorType>(VTy)->getNumElements();
|
||||
LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL);
|
||||
if (NumElements == 1)
|
||||
return ScalarTy;
|
||||
|
@ -83,7 +83,7 @@ static bool isConstantIntVector(Value *Mask) {
|
||||
if (!C)
|
||||
return false;
|
||||
|
||||
unsigned NumElts = cast<VectorType>(Mask->getType())->getNumElements();
|
||||
unsigned NumElts = cast<FixedVectorType>(Mask->getType())->getNumElements();
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
Constant *CElt = C->getAggregateElement(i);
|
||||
if (!CElt || !isa<ConstantInt>(CElt))
|
||||
@ -132,7 +132,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
|
||||
Value *Src0 = CI->getArgOperand(3);
|
||||
|
||||
const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
|
||||
VectorType *VecType = cast<VectorType>(CI->getType());
|
||||
VectorType *VecType = cast<FixedVectorType>(CI->getType());
|
||||
|
||||
Type *EltTy = VecType->getElementType();
|
||||
|
||||
@ -158,7 +158,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
|
||||
Type *NewPtrType =
|
||||
EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
|
||||
Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
|
||||
unsigned VectorWidth = VecType->getNumElements();
|
||||
unsigned VectorWidth = cast<FixedVectorType>(VecType)->getNumElements();
|
||||
|
||||
// The result vector
|
||||
Value *VResult = Src0;
|
||||
@ -271,7 +271,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) {
|
||||
Value *Mask = CI->getArgOperand(3);
|
||||
|
||||
const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
|
||||
VectorType *VecType = cast<VectorType>(Src->getType());
|
||||
auto *VecType = cast<VectorType>(Src->getType());
|
||||
|
||||
Type *EltTy = VecType->getElementType();
|
||||
|
||||
@ -295,7 +295,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) {
|
||||
Type *NewPtrType =
|
||||
EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
|
||||
Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
|
||||
unsigned VectorWidth = VecType->getNumElements();
|
||||
unsigned VectorWidth = cast<FixedVectorType>(VecType)->getNumElements();
|
||||
|
||||
if (isConstantIntVector(Mask)) {
|
||||
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
|
||||
@ -396,7 +396,7 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
|
||||
Value *Mask = CI->getArgOperand(2);
|
||||
Value *Src0 = CI->getArgOperand(3);
|
||||
|
||||
VectorType *VecType = cast<VectorType>(CI->getType());
|
||||
auto *VecType = cast<FixedVectorType>(CI->getType());
|
||||
Type *EltTy = VecType->getElementType();
|
||||
|
||||
IRBuilder<> Builder(CI->getContext());
|
||||
@ -520,8 +520,8 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) {
|
||||
Value *Alignment = CI->getArgOperand(2);
|
||||
Value *Mask = CI->getArgOperand(3);
|
||||
|
||||
assert(isa<VectorType>(Src->getType()) &&
|
||||
"Unexpected data type in masked scatter intrinsic");
|
||||
auto *SrcFVTy = cast<FixedVectorType>(Src->getType());
|
||||
|
||||
assert(
|
||||
isa<VectorType>(Ptrs->getType()) &&
|
||||
isa<PointerType>(cast<VectorType>(Ptrs->getType())->getElementType()) &&
|
||||
@ -534,7 +534,7 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) {
|
||||
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
|
||||
|
||||
MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue();
|
||||
unsigned VectorWidth = cast<VectorType>(Src->getType())->getNumElements();
|
||||
unsigned VectorWidth = SrcFVTy->getNumElements();
|
||||
|
||||
// Shorten the way if the mask is a vector of constants.
|
||||
if (isConstantIntVector(Mask)) {
|
||||
@ -605,7 +605,7 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
|
||||
Value *Mask = CI->getArgOperand(1);
|
||||
Value *PassThru = CI->getArgOperand(2);
|
||||
|
||||
VectorType *VecType = cast<VectorType>(CI->getType());
|
||||
auto *VecType = cast<FixedVectorType>(CI->getType());
|
||||
|
||||
Type *EltTy = VecType->getElementType();
|
||||
|
||||
@ -718,7 +718,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) {
|
||||
Value *Ptr = CI->getArgOperand(1);
|
||||
Value *Mask = CI->getArgOperand(2);
|
||||
|
||||
VectorType *VecType = cast<VectorType>(Src->getType());
|
||||
auto *VecType = cast<FixedVectorType>(Src->getType());
|
||||
|
||||
IRBuilder<> Builder(CI->getContext());
|
||||
Instruction *InsertPt = CI;
|
||||
|
@ -4295,7 +4295,7 @@ static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
|
||||
|
||||
Base = SDB->getValue(C);
|
||||
|
||||
unsigned NumElts = cast<VectorType>(Ptr->getType())->getNumElements();
|
||||
unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
|
||||
EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
|
||||
Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
|
||||
IndexType = ISD::SIGNED_SCALED;
|
||||
|
@ -1765,7 +1765,7 @@ static std::string scalarConstantToHexString(const Constant *C) {
|
||||
} else {
|
||||
unsigned NumElements;
|
||||
if (auto *VTy = dyn_cast<VectorType>(Ty))
|
||||
NumElements = VTy->getNumElements();
|
||||
NumElements = cast<FixedVectorType>(VTy)->getNumElements();
|
||||
else
|
||||
NumElements = Ty->getArrayNumElements();
|
||||
std::string HexString;
|
||||
|
@ -122,7 +122,14 @@ EVT EVT::getExtendedVectorElementType() const {
|
||||
|
||||
unsigned EVT::getExtendedVectorNumElements() const {
|
||||
assert(isExtended() && "Type is not extended!");
|
||||
return cast<VectorType>(LLVMTy)->getNumElements();
|
||||
ElementCount EC = cast<VectorType>(LLVMTy)->getElementCount();
|
||||
if (EC.Scalable) {
|
||||
WithColor::warning()
|
||||
<< "The code that requested the fixed number of elements has made the "
|
||||
"assumption that this vector is not scalable. This assumption was "
|
||||
"not correct, and this may lead to broken code\n";
|
||||
}
|
||||
return EC.Min;
|
||||
}
|
||||
|
||||
ElementCount EVT::getExtendedVectorElementCount() const {
|
||||
|
Loading…
x
Reference in New Issue
Block a user