mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-16 11:06:33 +00:00
Fix typo "indicies" (#92232)
This commit is contained in:
parent
ccbf908b08
commit
1650f1b3d7
@ -92,7 +92,7 @@ class VTTBuilder {
|
||||
using AddressPointsMapTy = llvm::DenseMap<BaseSubobject, uint64_t>;
|
||||
|
||||
/// The sub-VTT indices for the bases of the most derived class.
|
||||
llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndicies;
|
||||
llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndices;
|
||||
|
||||
/// The secondary virtual pointer indices of all subobjects of
|
||||
/// the most derived class.
|
||||
@ -148,8 +148,8 @@ public:
|
||||
}
|
||||
|
||||
/// Returns a reference to the sub-VTT indices.
|
||||
const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndicies() const {
|
||||
return SubVTTIndicies;
|
||||
const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndices() const {
|
||||
return SubVTTIndices;
|
||||
}
|
||||
|
||||
/// Returns a reference to the secondary virtual pointer indices.
|
||||
|
@ -189,7 +189,7 @@ void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
|
||||
|
||||
if (!IsPrimaryVTT) {
|
||||
// Remember the sub-VTT index.
|
||||
SubVTTIndicies[Base] = VTTComponents.size();
|
||||
SubVTTIndices[Base] = VTTComponents.size();
|
||||
}
|
||||
|
||||
uint64_t VTableIndex = VTTVTables.size();
|
||||
|
@ -138,23 +138,24 @@ uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
|
||||
BaseSubobject Base) {
|
||||
BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
|
||||
|
||||
SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
|
||||
if (I != SubVTTIndicies.end())
|
||||
SubVTTIndicesMapTy::iterator I = SubVTTIndices.find(ClassSubobjectPair);
|
||||
if (I != SubVTTIndices.end())
|
||||
return I->second;
|
||||
|
||||
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
|
||||
|
||||
for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
|
||||
Builder.getSubVTTIndicies().begin(),
|
||||
E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
|
||||
for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator
|
||||
I = Builder.getSubVTTIndices().begin(),
|
||||
E = Builder.getSubVTTIndices().end();
|
||||
I != E; ++I) {
|
||||
// Insert all indices.
|
||||
BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
|
||||
|
||||
SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
|
||||
SubVTTIndices.insert(std::make_pair(ClassSubobjectPair, I->second));
|
||||
}
|
||||
|
||||
I = SubVTTIndicies.find(ClassSubobjectPair);
|
||||
assert(I != SubVTTIndicies.end() && "Did not find index!");
|
||||
I = SubVTTIndices.find(ClassSubobjectPair);
|
||||
assert(I != SubVTTIndices.end() && "Did not find index!");
|
||||
|
||||
return I->second;
|
||||
}
|
||||
|
@ -38,10 +38,10 @@ class CodeGenVTables {
|
||||
typedef VTableLayout::AddressPointsMapTy VTableAddressPointsMapTy;
|
||||
|
||||
typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
|
||||
typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
|
||||
typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndicesMapTy;
|
||||
|
||||
/// SubVTTIndicies - Contains indices into the various sub-VTTs.
|
||||
SubVTTIndiciesMapTy SubVTTIndicies;
|
||||
/// SubVTTIndices - Contains indices into the various sub-VTTs.
|
||||
SubVTTIndicesMapTy SubVTTIndices;
|
||||
|
||||
typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
|
||||
SecondaryVirtualPointerIndicesMapTy;
|
||||
|
@ -12,7 +12,7 @@ from dex.dextIR import ValueIR
|
||||
|
||||
class DexExpectStepOrder(CommandBase):
|
||||
"""Expect the line every `DexExpectStepOrder` is found on to be stepped on
|
||||
in `order`. Each instance must have a set of unique ascending indicies.
|
||||
in `order`. Each instance must have a set of unique ascending indices.
|
||||
|
||||
DexExpectStepOrder(*order)
|
||||
|
||||
|
@ -590,7 +590,7 @@ Syntax:
|
||||
|
||||
Note that %indices are not operands, they are the elemental region block
|
||||
arguments, representing the array iteration space in a one based fashion.
|
||||
The choice of using one based indicies is to match Fortran default for
|
||||
The choice of using one based indices is to match Fortran default for
|
||||
array variables, so that there is no need to generate bound adjustments
|
||||
when working with one based array variables in an expression.
|
||||
|
||||
|
@ -144,7 +144,7 @@ subroutine test_nested_foralls()
|
||||
! ifoo and ibar could depend on x since it is a module
|
||||
! variable use associated. The calls in the control value
|
||||
! computation cannot be hoisted from the outer forall
|
||||
! even when they do not depend on outer forall indicies.
|
||||
! even when they do not depend on outer forall indices.
|
||||
forall (integer(8)::j=jfoo():jbar())
|
||||
x(i, j) = x(j, i)
|
||||
end forall
|
||||
|
@ -496,7 +496,7 @@ private:
|
||||
// the type of index, and returns a TypeDesc describing that type. It does not
|
||||
// modify cur_pos.
|
||||
LIBC_INLINE TypeDesc get_type_desc(size_t index) {
|
||||
// index mode is assumed, and the indicies start at 1, so an index
|
||||
// index mode is assumed, and the indices start at 1, so an index
|
||||
// of 0 is invalid.
|
||||
size_t local_pos = 0;
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include <utility>
|
||||
|
||||
// Layout that wraps indices to test some idiosyncratic behavior
|
||||
// - basically it is a layout_left where indicies are first wrapped i.e. i%Wrap
|
||||
// - basically it is a layout_left where indices are first wrapped i.e. i%Wrap
|
||||
// - only accepts integers as indices
|
||||
// - is_always_strided and is_always_unique are false
|
||||
// - is_strided and is_unique are true if all extents are smaller than Wrap
|
||||
|
@ -644,7 +644,7 @@ source vector should be inserted into.
|
||||
The index must be a constant multiple of the second source vector's minimum
|
||||
vector length. If the vectors are scalable, then the index is first scaled by
|
||||
the runtime scaling factor. The indices inserted in the source vector must be
|
||||
valid indicies of that vector. If this condition cannot be determined statically
|
||||
valid indices of that vector. If this condition cannot be determined statically
|
||||
but is false at runtime, then the result vector is undefined.
|
||||
|
||||
.. code-block:: none
|
||||
@ -661,7 +661,7 @@ the source vector.
|
||||
The index must be a constant multiple of the source vector's minimum vector
|
||||
length. If the source vector is a scalable vector, then the index is first
|
||||
scaled by the runtime scaling factor. The indices extracted from the source
|
||||
vector must be valid indicies of that vector. If this condition cannot be
|
||||
vector must be valid indices of that vector. If this condition cannot be
|
||||
determined statically but is false at runtime, then the result vector is
|
||||
undefined.
|
||||
|
||||
|
@ -765,8 +765,8 @@ class Instruction : InstructionEncoding {
|
||||
|
||||
/// Should generate helper functions that help you to map a logical operand's
|
||||
/// index to the underlying MIOperand's index.
|
||||
/// In most architectures logical operand indicies are equal to
|
||||
/// MIOperand indicies, but for some CISC architectures, a logical operand
|
||||
/// In most architectures logical operand indices are equal to
|
||||
/// MIOperand indices, but for some CISC architectures, a logical operand
|
||||
/// might be consist of multiple MIOperand (e.g. a logical operand that
|
||||
/// uses complex address mode).
|
||||
bit UseLogicalOperandMappings = false;
|
||||
|
@ -3444,9 +3444,9 @@ bool DependenceInfo::tryDelinearizeFixedSize(
|
||||
// iff the subscripts are positive and are less than the range of the
|
||||
// dimension.
|
||||
if (!DisableDelinearizationChecks) {
|
||||
auto AllIndiciesInRange = [&](SmallVector<int, 4> &DimensionSizes,
|
||||
SmallVectorImpl<const SCEV *> &Subscripts,
|
||||
Value *Ptr) {
|
||||
auto AllIndicesInRange = [&](SmallVector<int, 4> &DimensionSizes,
|
||||
SmallVectorImpl<const SCEV *> &Subscripts,
|
||||
Value *Ptr) {
|
||||
size_t SSize = Subscripts.size();
|
||||
for (size_t I = 1; I < SSize; ++I) {
|
||||
const SCEV *S = Subscripts[I];
|
||||
@ -3462,8 +3462,8 @@ bool DependenceInfo::tryDelinearizeFixedSize(
|
||||
return true;
|
||||
};
|
||||
|
||||
if (!AllIndiciesInRange(SrcSizes, SrcSubscripts, SrcPtr) ||
|
||||
!AllIndiciesInRange(DstSizes, DstSubscripts, DstPtr)) {
|
||||
if (!AllIndicesInRange(SrcSizes, SrcSubscripts, SrcPtr) ||
|
||||
!AllIndicesInRange(DstSizes, DstSubscripts, DstPtr)) {
|
||||
SrcSubscripts.clear();
|
||||
DstSubscripts.clear();
|
||||
return false;
|
||||
|
@ -986,7 +986,7 @@ void ModuleBitcodeWriter::writeTypeTable() {
|
||||
Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */);
|
||||
SmallVector<uint64_t, 64> TypeVals;
|
||||
|
||||
uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies();
|
||||
uint64_t NumBits = VE.computeBitsRequiredForTypeIndices();
|
||||
|
||||
// Abbrev for TYPE_CODE_OPAQUE_POINTER.
|
||||
auto Abbv = std::make_shared<BitCodeAbbrev>();
|
||||
@ -3721,7 +3721,7 @@ void ModuleBitcodeWriter::writeBlockInfo() {
|
||||
auto Abbv = std::make_shared<BitCodeAbbrev>();
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
|
||||
CONSTANTS_SETTYPE_ABBREV)
|
||||
llvm_unreachable("Unexpected abbrev ordering!");
|
||||
@ -3741,7 +3741,7 @@ void ModuleBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // cast opc
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // typeid
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
|
||||
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
|
||||
@ -3763,7 +3763,7 @@ void ModuleBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
|
||||
@ -3815,7 +3815,7 @@ void ModuleBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
|
||||
FUNCTION_INST_CAST_ABBREV)
|
||||
@ -3826,7 +3826,7 @@ void ModuleBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // flags
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
|
||||
|
@ -1191,6 +1191,6 @@ unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const {
|
||||
return getGlobalBasicBlockID(BB);
|
||||
}
|
||||
|
||||
uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const {
|
||||
uint64_t ValueEnumerator::computeBitsRequiredForTypeIndices() const {
|
||||
return Log2_32_Ceil(getTypes().size() + 1);
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ public:
|
||||
void incorporateFunction(const Function &F);
|
||||
|
||||
void purgeFunction();
|
||||
uint64_t computeBitsRequiredForTypeIndicies() const;
|
||||
uint64_t computeBitsRequiredForTypeIndices() const;
|
||||
|
||||
private:
|
||||
void OptimizeConstants(unsigned CstStart, unsigned CstEnd);
|
||||
|
@ -86,7 +86,7 @@
|
||||
/// lookup the VarLoc in the VarLocMap. Rather than operate directly on machine
|
||||
/// locations, the dataflow analysis in this pass identifies locations by their
|
||||
/// indices in the VarLocMap, meaning all the variable locations in a block can
|
||||
/// be described by a sparse vector of VarLocMap indicies.
|
||||
/// be described by a sparse vector of VarLocMap indices.
|
||||
///
|
||||
/// All the storage for the dataflow analysis is local to the ExtendRanges
|
||||
/// method and passed down to helper methods. "OutLocs" and "InLocs" record the
|
||||
|
@ -212,7 +212,7 @@ static const std::vector<int64_t> PerLiveRangeShape{1, NumberOfInterferences};
|
||||
M(float, mbb_frequencies, MBBFrequencyShape, \
|
||||
"A vector of machine basic block frequencies") \
|
||||
M(int64_t, mbb_mapping, InstructionsShape, \
|
||||
"A vector of indicies mapping instructions to MBBs")
|
||||
"A vector of indices mapping instructions to MBBs")
|
||||
#else
|
||||
#define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M)
|
||||
#define RA_EVICT_REST_DEVELOPMENT_FEATURES(M)
|
||||
|
@ -1444,7 +1444,7 @@ bool PEI::replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI,
|
||||
// pointer as the base register.
|
||||
if (MI.getOpcode() == TargetOpcode::STATEPOINT) {
|
||||
assert((!MI.isDebugValue() || OpIdx == 0) &&
|
||||
"Frame indicies can only appear as the first operand of a "
|
||||
"Frame indices can only appear as the first operand of a "
|
||||
"DBG_VALUE machine instruction");
|
||||
Register Reg;
|
||||
MachineOperand &Offset = MI.getOperand(OpIdx + 1);
|
||||
|
@ -154,7 +154,7 @@ Error ELFAttributeParser::parseSubsection(uint32_t length) {
|
||||
Twine::utohexstr(cursor.tell() - 5));
|
||||
|
||||
StringRef scopeName, indexName;
|
||||
SmallVector<uint8_t, 8> indicies;
|
||||
SmallVector<uint8_t, 8> indices;
|
||||
switch (tag) {
|
||||
case ELFAttrs::File:
|
||||
scopeName = "FileAttributes";
|
||||
@ -162,12 +162,12 @@ Error ELFAttributeParser::parseSubsection(uint32_t length) {
|
||||
case ELFAttrs::Section:
|
||||
scopeName = "SectionAttributes";
|
||||
indexName = "Sections";
|
||||
parseIndexList(indicies);
|
||||
parseIndexList(indices);
|
||||
break;
|
||||
case ELFAttrs::Symbol:
|
||||
scopeName = "SymbolAttributes";
|
||||
indexName = "Symbols";
|
||||
parseIndexList(indicies);
|
||||
parseIndexList(indices);
|
||||
break;
|
||||
default:
|
||||
return createStringError(errc::invalid_argument,
|
||||
@ -178,8 +178,8 @@ Error ELFAttributeParser::parseSubsection(uint32_t length) {
|
||||
|
||||
if (sw) {
|
||||
DictScope scope(*sw, scopeName);
|
||||
if (!indicies.empty())
|
||||
sw->printList(indexName, indicies);
|
||||
if (!indices.empty())
|
||||
sw->printList(indexName, indices);
|
||||
if (Error e = parseAttributeList(size - 5))
|
||||
return e;
|
||||
} else if (Error e = parseAttributeList(size - 5))
|
||||
|
@ -23881,7 +23881,7 @@ static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
|
||||
|
||||
// For "scalar + vector of indices", just scale the indices. This only
|
||||
// applies to non-temporal scatters because there's no instruction that takes
|
||||
// indicies.
|
||||
// indices.
|
||||
if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) {
|
||||
Offset =
|
||||
getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits());
|
||||
|
@ -7053,7 +7053,7 @@ SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
|
||||
SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
|
||||
DAG.getSplatBuildVector(VecVT, SL, InsVal));
|
||||
|
||||
// 2. Mask off all other indicies except the required index within (1).
|
||||
// 2. Mask off all other indices except the required index within (1).
|
||||
SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
|
||||
|
||||
// 3. Mask off the required index within the target vector.
|
||||
|
@ -958,7 +958,7 @@ void DXILBitcodeWriter::writeTypeTable() {
|
||||
Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */);
|
||||
SmallVector<uint64_t, 64> TypeVals;
|
||||
|
||||
uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies();
|
||||
uint64_t NumBits = VE.computeBitsRequiredForTypeIndices();
|
||||
|
||||
// Abbrev for TYPE_CODE_POINTER.
|
||||
auto Abbv = std::make_shared<BitCodeAbbrev>();
|
||||
@ -2747,7 +2747,7 @@ void DXILBitcodeWriter::writeBlockInfo() {
|
||||
auto Abbv = std::make_shared<BitCodeAbbrev>();
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, std::move(Abbv)) !=
|
||||
CONSTANTS_SETTYPE_ABBREV)
|
||||
assert(false && "Unexpected abbrev ordering!");
|
||||
@ -2767,7 +2767,7 @@ void DXILBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // cast opc
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // typeid
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
|
||||
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, std::move(Abbv)) !=
|
||||
@ -2789,7 +2789,7 @@ void DXILBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) !=
|
||||
@ -2822,7 +2822,7 @@ void DXILBitcodeWriter::writeBlockInfo() {
|
||||
Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
|
||||
VE.computeBitsRequiredForTypeIndicies()));
|
||||
VE.computeBitsRequiredForTypeIndices()));
|
||||
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
|
||||
if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) !=
|
||||
(unsigned)FUNCTION_INST_CAST_ABBREV)
|
||||
|
@ -1140,6 +1140,6 @@ unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const {
|
||||
return getGlobalBasicBlockID(BB);
|
||||
}
|
||||
|
||||
uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const {
|
||||
uint64_t ValueEnumerator::computeBitsRequiredForTypeIndices() const {
|
||||
return Log2_32_Ceil(getTypes().size() + 1);
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ public:
|
||||
void incorporateFunction(const Function &F);
|
||||
|
||||
void purgeFunction();
|
||||
uint64_t computeBitsRequiredForTypeIndicies() const;
|
||||
uint64_t computeBitsRequiredForTypeIndices() const;
|
||||
|
||||
void EnumerateType(Type *T);
|
||||
|
||||
|
@ -14945,7 +14945,7 @@ static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
|
||||
}
|
||||
}
|
||||
|
||||
// If the vector extract indicies are not correct, add the appropriate
|
||||
// If the vector extract indices are not correct, add the appropriate
|
||||
// vector_shuffle.
|
||||
int TgtElemArrayIdx;
|
||||
int InputSize = Input.getValueType().getScalarSizeInBits();
|
||||
|
@ -331,7 +331,7 @@ Instruction *
|
||||
InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN) {
|
||||
auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
|
||||
|
||||
// Scan to see if all operands are `insertvalue`'s with the same indicies,
|
||||
// Scan to see if all operands are `insertvalue`'s with the same indices,
|
||||
// and all have a single use.
|
||||
for (Value *V : drop_begin(PN.incoming_values())) {
|
||||
auto *I = dyn_cast<InsertValueInst>(V);
|
||||
@ -371,7 +371,7 @@ Instruction *
|
||||
InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN) {
|
||||
auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
|
||||
|
||||
// Scan to see if all operands are `extractvalue`'s with the same indicies,
|
||||
// Scan to see if all operands are `extractvalue`'s with the same indices,
|
||||
// and all have a single use.
|
||||
for (Value *V : drop_begin(PN.incoming_values())) {
|
||||
auto *I = dyn_cast<ExtractValueInst>(V);
|
||||
|
@ -1008,7 +1008,7 @@ bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
|
||||
}
|
||||
|
||||
IRBuilder<> Builder(GEP);
|
||||
// For trivial GEP chains, we can swap the indicies.
|
||||
// For trivial GEP chains, we can swap the indices.
|
||||
Value *NewSrc = Builder.CreateGEP(
|
||||
GEP->getSourceElementType(), PtrGEP->getPointerOperand(),
|
||||
SmallVector<Value *, 4>(GEP->indices()), "", IsChainInBounds);
|
||||
|
@ -1061,7 +1061,7 @@ void initializeNetwork(const ProfiParams &Params, MinCostMaxFlow &Network,
|
||||
assert(NumJumps > 0 && "Too few jumps in a function");
|
||||
|
||||
// Introducing dummy source/sink pairs to allow flow circulation.
|
||||
// The nodes corresponding to blocks of the function have indicies in
|
||||
// The nodes corresponding to blocks of the function have indices in
|
||||
// the range [0 .. 2 * NumBlocks); the dummy sources/sinks are indexed by the
|
||||
// next four values.
|
||||
uint64_t S = 2 * NumBlocks;
|
||||
|
@ -6485,7 +6485,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
const EdgeInfo &UserTreeIdx) {
|
||||
assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
|
||||
|
||||
SmallVector<int> ReuseShuffleIndicies;
|
||||
SmallVector<int> ReuseShuffleIndices;
|
||||
SmallVector<Value *> UniqueValues;
|
||||
SmallVector<Value *> NonUniqueValueVL;
|
||||
auto TryToFindDuplicates = [&](const InstructionsState &S,
|
||||
@ -6494,19 +6494,19 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
DenseMap<Value *, unsigned> UniquePositions(VL.size());
|
||||
for (Value *V : VL) {
|
||||
if (isConstant(V)) {
|
||||
ReuseShuffleIndicies.emplace_back(
|
||||
ReuseShuffleIndices.emplace_back(
|
||||
isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size());
|
||||
UniqueValues.emplace_back(V);
|
||||
continue;
|
||||
}
|
||||
auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
|
||||
ReuseShuffleIndicies.emplace_back(Res.first->second);
|
||||
ReuseShuffleIndices.emplace_back(Res.first->second);
|
||||
if (Res.second)
|
||||
UniqueValues.emplace_back(V);
|
||||
}
|
||||
size_t NumUniqueScalarValues = UniqueValues.size();
|
||||
if (NumUniqueScalarValues == VL.size()) {
|
||||
ReuseShuffleIndicies.clear();
|
||||
ReuseShuffleIndices.clear();
|
||||
} else {
|
||||
// FIXME: Reshuffing scalars is not supported yet for non-power-of-2 ops.
|
||||
if (UserTreeIdx.UserTE && UserTreeIdx.UserTE->isNonPowOf2Vec()) {
|
||||
@ -6532,7 +6532,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
})) {
|
||||
unsigned PWSz = PowerOf2Ceil(UniqueValues.size());
|
||||
if (PWSz == VL.size()) {
|
||||
ReuseShuffleIndicies.clear();
|
||||
ReuseShuffleIndices.clear();
|
||||
} else {
|
||||
NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end());
|
||||
NonUniqueValueVL.append(PWSz - UniqueValues.size(),
|
||||
@ -6579,7 +6579,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
|
||||
if (TryToFindDuplicates(S))
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -6590,7 +6590,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
|
||||
if (TryToFindDuplicates(S))
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -6694,7 +6694,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n");
|
||||
if (TryToFindDuplicates(S))
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -6722,7 +6722,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
|
||||
if (TryToFindDuplicates(S))
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
@ -6745,7 +6745,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
<< ") is already in tree.\n");
|
||||
if (TryToFindDuplicates(S))
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -6757,7 +6757,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
|
||||
if (TryToFindDuplicates(S))
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -6810,7 +6810,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps);
|
||||
if (State == TreeEntry::NeedToGather) {
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -6832,7 +6832,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
!BS.getScheduleData(VL0)->isPartOfBundle()) &&
|
||||
"tryScheduleBundle should cancelScheduling on failure");
|
||||
newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
NonScheduledFirst.insert(VL.front());
|
||||
return;
|
||||
}
|
||||
@ -6845,7 +6845,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
auto *PH = cast<PHINode>(VL0);
|
||||
|
||||
TreeEntry *TE =
|
||||
newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
|
||||
newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
|
||||
|
||||
// Keeps the reordered operands to avoid code duplication.
|
||||
@ -6862,7 +6862,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (CurrentOrder.empty()) {
|
||||
LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
|
||||
newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
// This is a special case, as it does not gather, but at the same time
|
||||
// we are not extending buildTree_rec() towards the operands.
|
||||
ValueList Op0;
|
||||
@ -6881,7 +6881,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// Insert new order with initial value 0, if it does not exist,
|
||||
// otherwise return the iterator to the existing one.
|
||||
newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies, CurrentOrder);
|
||||
ReuseShuffleIndices, CurrentOrder);
|
||||
// This is a special case, as it does not gather, but at the same time
|
||||
// we are not extending buildTree_rec() towards the operands.
|
||||
ValueList Op0;
|
||||
@ -6890,7 +6890,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
return;
|
||||
}
|
||||
case Instruction::InsertElement: {
|
||||
assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
|
||||
assert(ReuseShuffleIndices.empty() && "All inserts should be unique");
|
||||
|
||||
auto OrdCompare = [](const std::pair<int, int> &P1,
|
||||
const std::pair<int, int> &P2) {
|
||||
@ -6941,12 +6941,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (CurrentOrder.empty()) {
|
||||
// Original loads are consecutive and does not require reordering.
|
||||
TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
|
||||
} else {
|
||||
// Need to reorder.
|
||||
TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies, CurrentOrder);
|
||||
ReuseShuffleIndices, CurrentOrder);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
|
||||
}
|
||||
TE->setOperandsInOrder();
|
||||
@ -6955,10 +6955,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// Vectorizing non-consecutive loads with `llvm.masked.gather`.
|
||||
if (CurrentOrder.empty()) {
|
||||
TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
|
||||
UserTreeIdx, ReuseShuffleIndicies);
|
||||
UserTreeIdx, ReuseShuffleIndices);
|
||||
} else {
|
||||
TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
|
||||
UserTreeIdx, ReuseShuffleIndicies, CurrentOrder);
|
||||
UserTreeIdx, ReuseShuffleIndices, CurrentOrder);
|
||||
}
|
||||
TE->setOperandsInOrder();
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of strided loads.\n");
|
||||
@ -6966,7 +6966,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
case TreeEntry::ScatterVectorize:
|
||||
// Vectorizing non-consecutive loads with `llvm.masked.gather`.
|
||||
TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
|
||||
UserTreeIdx, ReuseShuffleIndicies);
|
||||
UserTreeIdx, ReuseShuffleIndices);
|
||||
TE->setOperandsInOrder();
|
||||
buildTree_rec(PointerOps, Depth + 1, {TE, 0});
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
|
||||
@ -7020,7 +7020,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
ExtraBitWidthNodes.insert(VectorizableTree.size() + 1);
|
||||
}
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
|
||||
|
||||
TE->setOperandsInOrder();
|
||||
@ -7039,7 +7039,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// Check that all of the compares have the same predicate.
|
||||
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
|
||||
|
||||
ValueList Left, Right;
|
||||
@ -7100,7 +7100,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
case Instruction::Or:
|
||||
case Instruction::Xor: {
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
|
||||
|
||||
// Sort operands of the instructions so that each side is more likely to
|
||||
@ -7128,7 +7128,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
}
|
||||
case Instruction::GetElementPtr: {
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
|
||||
SmallVector<ValueList, 2> Operands(2);
|
||||
// Prepare the operand vector for pointer operands.
|
||||
@ -7194,14 +7194,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (CurrentOrder.empty()) {
|
||||
// Original stores are consecutive and does not require reordering.
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
TE->setOperandsInOrder();
|
||||
buildTree_rec(Operands, Depth + 1, {TE, 0});
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
|
||||
} else {
|
||||
fixupOrderingIndices(CurrentOrder);
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies, CurrentOrder);
|
||||
ReuseShuffleIndices, CurrentOrder);
|
||||
TE->setOperandsInOrder();
|
||||
buildTree_rec(Operands, Depth + 1, {TE, 0});
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
|
||||
@ -7215,7 +7215,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
|
||||
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
// Sort operands of the instructions so that each side is more likely to
|
||||
// have the same opcode.
|
||||
if (isCommutative(VL0)) {
|
||||
@ -7261,7 +7261,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
}
|
||||
case Instruction::ShuffleVector: {
|
||||
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
|
||||
ReuseShuffleIndicies);
|
||||
ReuseShuffleIndices);
|
||||
LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
|
||||
|
||||
// Reorder operands if reordering would enable vectorization.
|
||||
@ -12107,8 +12107,8 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
|
||||
unsigned VF = E->getVectorFactor();
|
||||
|
||||
bool NeedFreeze = false;
|
||||
SmallVector<int> ReuseShuffleIndicies(E->ReuseShuffleIndices.begin(),
|
||||
E->ReuseShuffleIndices.end());
|
||||
SmallVector<int> ReuseShuffleIndices(E->ReuseShuffleIndices.begin(),
|
||||
E->ReuseShuffleIndices.end());
|
||||
SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end());
|
||||
// Build a mask out of the reorder indices and reorder scalars per this
|
||||
// mask.
|
||||
|
@ -234,7 +234,7 @@ entry:
|
||||
ret <16 x i16> %shuffle
|
||||
}
|
||||
|
||||
;;;; Cases with undef indicies mixed in the mask
|
||||
;;;; Cases with undef indices mixed in the mask
|
||||
|
||||
define <8 x float> @shuffle_v8f32_uu67u9ub(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
|
||||
; ALL-LABEL: shuffle_v8f32_uu67u9ub:
|
||||
|
@ -19,11 +19,11 @@ TpiStream:
|
||||
# (int, char **) [Index: 0x1003]
|
||||
- Kind: LF_ARGLIST
|
||||
ArgList:
|
||||
ArgIndicies: [ 116, 0x1002 ]
|
||||
ArgIndices: [ 116, 0x1002 ]
|
||||
# (int, double) [Index: 0x1004]
|
||||
- Kind: LF_ARGLIST
|
||||
ArgList:
|
||||
ArgIndicies: [ 116, 65 ] # (int, double)
|
||||
ArgIndices: [ 116, 65 ] # (int, double)
|
||||
# int main(int argc, char **argv) [Index: 0x1005]
|
||||
- Kind: LF_PROCEDURE
|
||||
Procedure:
|
||||
|
@ -65,10 +65,10 @@ multiple_personality:
|
||||
@ CHECK: .personalityindex 0
|
||||
@ CHECK: ^
|
||||
|
||||
.global multiple_personality_indicies
|
||||
.type multiple_personality_indicies,%function
|
||||
.global multiple_personality_indices
|
||||
.type multiple_personality_indices,%function
|
||||
.thumb_func
|
||||
multiple_personality_indicies:
|
||||
multiple_personality_indices:
|
||||
.fnstart
|
||||
.personalityindex 0
|
||||
.personalityindex 1
|
||||
|
@ -131,7 +131,7 @@ end:
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; But the indicies must match
|
||||
; But the indices must match
|
||||
define i32 @test4({ i32, i32 } %agg_left, { i32, i32 } %agg_right, i1 %c) {
|
||||
; CHECK-LABEL: @test4(
|
||||
; CHECK-NEXT: entry:
|
||||
@ -162,7 +162,7 @@ end:
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; More complex aggregates are fine, too, as long as indicies match.
|
||||
; More complex aggregates are fine, too, as long as indices match.
|
||||
define i32 @test5({{ i32, i32 }, { i32, i32 }} %agg_left, {{ i32, i32 }, { i32, i32 }} %agg_right, i1 %c) {
|
||||
; CHECK-LABEL: @test5(
|
||||
; CHECK-NEXT: entry:
|
||||
@ -192,7 +192,7 @@ end:
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; The indicies must fully match, on all levels.
|
||||
; The indices must fully match, on all levels.
|
||||
define i32 @test6({{ i32, i32 }, { i32, i32 }} %agg_left, {{ i32, i32 }, { i32, i32 }} %agg_right, i1 %c) {
|
||||
; CHECK-LABEL: @test6(
|
||||
; CHECK-NEXT: entry:
|
||||
@ -282,7 +282,7 @@ end:
|
||||
}
|
||||
|
||||
; Also, unlike PHI-of-insertvalues, here the base aggregates of extractvalue
|
||||
; can have different types, and just checking the indicies is not enough.
|
||||
; can have different types, and just checking the indices is not enough.
|
||||
define i32 @test9({ i32, i32 } %agg_left, { i32, { i32, i32 } } %agg_right, i1 %c) {
|
||||
; CHECK-LABEL: @test9(
|
||||
; CHECK-NEXT: entry:
|
||||
|
@ -192,7 +192,7 @@ end:
|
||||
ret { i32, i32 } %r
|
||||
}
|
||||
|
||||
; But the indicies must match
|
||||
; But the indices must match
|
||||
define { i32, i32 } @test6({ i32, i32 } %agg, i32 %val_left, i32 %val_right, i1 %c) {
|
||||
; CHECK-LABEL: @test6(
|
||||
; CHECK-NEXT: entry:
|
||||
@ -223,7 +223,7 @@ end:
|
||||
ret { i32, i32 } %r
|
||||
}
|
||||
|
||||
; More complex aggregates are fine, too, as long as indicies match.
|
||||
; More complex aggregates are fine, too, as long as indices match.
|
||||
define {{ i32, i32 }, { i32, i32 }} @test7({{ i32, i32 }, { i32, i32 }} %agg, i32 %val_left, i32 %val_right, i1 %c) {
|
||||
; CHECK-LABEL: @test7(
|
||||
; CHECK-NEXT: entry:
|
||||
@ -253,7 +253,7 @@ end:
|
||||
ret {{ i32, i32 }, { i32, i32 }} %r
|
||||
}
|
||||
|
||||
; The indicies must fully match, on all levels.
|
||||
; The indices must fully match, on all levels.
|
||||
define {{ i32, i32 }, { i32, i32 }} @test8({{ i32, i32 }, { i32, i32 }} %agg, i32 %val_left, i32 %val_right, i1 %c) {
|
||||
; CHECK-LABEL: @test8(
|
||||
; CHECK-NEXT: entry:
|
||||
|
@ -85,8 +85,8 @@ define void @both_operands_need_extraction.4elts(<4 x ptr> %baseptrs, <4 x i64>
|
||||
|
||||
;-------------------------------------------------------------------------------
|
||||
|
||||
define void @indicies_need_extraction.2elts(ptr %baseptr, <2 x i64> %indices) {
|
||||
; CHECK-LABEL: @indicies_need_extraction.2elts(
|
||||
define void @indices_need_extraction.2elts(ptr %baseptr, <2 x i64> %indices) {
|
||||
; CHECK-LABEL: @indices_need_extraction.2elts(
|
||||
; CHECK-NEXT: [[PTRS:%.*]] = getelementptr inbounds i64, ptr [[BASEPTR:%.*]], <2 x i64> [[INDICES:%.*]]
|
||||
; CHECK-NEXT: [[PTR_0:%.*]] = extractelement <2 x ptr> [[PTRS]], i64 0
|
||||
; CHECK-NEXT: call void @use(ptr [[PTR_0]])
|
||||
@ -105,8 +105,8 @@ define void @indicies_need_extraction.2elts(ptr %baseptr, <2 x i64> %indices) {
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @indicies_need_extraction.3elts(ptr %baseptr, <3 x i64> %indices) {
|
||||
; CHECK-LABEL: @indicies_need_extraction.3elts(
|
||||
define void @indices_need_extraction.3elts(ptr %baseptr, <3 x i64> %indices) {
|
||||
; CHECK-LABEL: @indices_need_extraction.3elts(
|
||||
; CHECK-NEXT: [[PTRS:%.*]] = getelementptr inbounds i64, ptr [[BASEPTR:%.*]], <3 x i64> [[INDICES:%.*]]
|
||||
; CHECK-NEXT: [[PTR_0:%.*]] = extractelement <3 x ptr> [[PTRS]], i64 0
|
||||
; CHECK-NEXT: call void @use(ptr [[PTR_0]])
|
||||
@ -130,8 +130,8 @@ define void @indicies_need_extraction.3elts(ptr %baseptr, <3 x i64> %indices) {
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @indicies_need_extraction.4elts(ptr %baseptr, <4 x i64> %indices) {
|
||||
; CHECK-LABEL: @indicies_need_extraction.4elts(
|
||||
define void @indices_need_extraction.4elts(ptr %baseptr, <4 x i64> %indices) {
|
||||
; CHECK-LABEL: @indices_need_extraction.4elts(
|
||||
; CHECK-NEXT: [[PTRS:%.*]] = getelementptr inbounds i64, ptr [[BASEPTR:%.*]], <4 x i64> [[INDICES:%.*]]
|
||||
; CHECK-NEXT: [[PTR_0:%.*]] = extractelement <4 x ptr> [[PTRS]], i64 0
|
||||
; CHECK-NEXT: call void @use(ptr [[PTR_0]])
|
||||
|
@ -3385,13 +3385,13 @@ struct Conv1DGenerator
|
||||
auto rhsSize = cast<VectorType>(rhs.getType()).getShape()[0];
|
||||
auto resSize = cast<VectorType>(res.getType()).getShape()[1];
|
||||
|
||||
SmallVector<int64_t, 16> indicies;
|
||||
SmallVector<int64_t, 16> indices;
|
||||
for (int i = 0; i < resSize / rhsSize; ++i) {
|
||||
for (int j = 0; j < rhsSize; ++j)
|
||||
indicies.push_back(j);
|
||||
indices.push_back(j);
|
||||
}
|
||||
|
||||
rhs = rewriter.create<vector::ShuffleOp>(loc, rhs, rhs, indicies);
|
||||
rhs = rewriter.create<vector::ShuffleOp>(loc, rhs, rhs, indices);
|
||||
}
|
||||
// Broadcast the filter to match the output vector
|
||||
rhs = rewriter.create<vector::BroadcastOp>(
|
||||
|
Loading…
x
Reference in New Issue
Block a user