mlir/tblgen: use std::optional in generation

This is part of an effort to migrate from llvm::Optional to
std::optional. This patch changes the way mlir-tblgen generates .inc
files, and modifies tests and documentation appropriately. It is a "no
compromises" patch, and doesn't leave the user with an unpleasant mix of
llvm::Optional and std::optional.

A non-trivial change has been made to ControlFlowInterfaces to split one
constructor into two, relating to a build failure on Windows.

See also: https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716

Signed-off-by: Ramkumar Ramachandra <r@artagnon.com>

Differential Revision: https://reviews.llvm.org/D138934
This commit is contained in:
Ramkumar Ramachandra 2022-12-14 11:39:19 +01:00
parent a0f168fa89
commit 22426110c5
128 changed files with 689 additions and 660 deletions

View File

@ -483,15 +483,15 @@ class fir_SwitchTerminatorOp<string mnemonic, list<Trait> traits = []> :
// The number of blocks that may be branched to
unsigned getNumDest() { return (*this)->getNumSuccessors(); }
llvm::Optional<mlir::OperandRange> getCompareOperands(unsigned cond);
llvm::Optional<llvm::ArrayRef<mlir::Value>> getCompareOperands(
std::optional<mlir::OperandRange> getCompareOperands(unsigned cond);
std::optional<llvm::ArrayRef<mlir::Value>> getCompareOperands(
llvm::ArrayRef<mlir::Value> operands, unsigned cond);
llvm::Optional<mlir::ValueRange> getCompareOperands(
std::optional<mlir::ValueRange> getCompareOperands(
mlir::ValueRange operands, unsigned cond);
llvm::Optional<llvm::ArrayRef<mlir::Value>> getSuccessorOperands(
std::optional<llvm::ArrayRef<mlir::Value>> getSuccessorOperands(
llvm::ArrayRef<mlir::Value> operands, unsigned cond);
llvm::Optional<mlir::ValueRange> getSuccessorOperands(
std::optional<mlir::ValueRange> getSuccessorOperands(
mlir::ValueRange operands, unsigned cond);
// Helper function to deal with Optional operand forms
@ -2426,16 +2426,16 @@ def fir_StringLitOp : fir_Op<"string_lit", [NoMemoryEffect]> {
let builders = [
OpBuilder<(ins "fir::CharacterType":$inType,
"llvm::StringRef":$value,
CArg<"llvm::Optional<int64_t>", "{}">:$len)>,
CArg<"std::optional<int64_t>", "{}">:$len)>,
OpBuilder<(ins "fir::CharacterType":$inType,
"llvm::ArrayRef<char>":$xlist,
CArg<"llvm::Optional<int64_t>", "{}">:$len)>,
CArg<"std::optional<int64_t>", "{}">:$len)>,
OpBuilder<(ins "fir::CharacterType":$inType,
"llvm::ArrayRef<char16_t>":$xlist,
CArg<"llvm::Optional<int64_t>", "{}">:$len)>,
CArg<"std::optional<int64_t>", "{}">:$len)>,
OpBuilder<(ins "fir::CharacterType":$inType,
"llvm::ArrayRef<char32_t>":$xlist,
CArg<"llvm::Optional<int64_t>", "{}">:$len)>];
CArg<"std::optional<int64_t>", "{}">:$len)>];
let extraClassDeclaration = [{
static constexpr const char *size() { return "size"; }

View File

@ -36,7 +36,7 @@ def fir_FortranVariableOpInterface : OpInterface<"FortranVariableOpInterface"> {
>,
InterfaceMethod<
/*desc=*/"Get Fortran attributes",
/*retTy=*/"llvm::Optional<fir::FortranVariableFlagsEnum>",
/*retTy=*/"std::optional<fir::FortranVariableFlagsEnum>",
/*methodName=*/"getFortranAttrs",
/*args=*/(ins),
/*methodBody=*/[{}],
@ -91,7 +91,7 @@ def fir_FortranVariableOpInterface : OpInterface<"FortranVariableOpInterface"> {
}
/// Return the rank of the entity if it is known at compile time.
llvm::Optional<unsigned> getRank() {
std::optional<unsigned> getRank() {
if (auto sequenceType =
getElementOrSequenceType().dyn_cast<fir::SequenceType>()) {
if (sequenceType.hasUnknownShape())

View File

@ -202,7 +202,7 @@ def hlfir_DesignateOp : hlfir_Op<"designate", [AttrSizedOperandSegments,
"llvm::StringRef":$component, "mlir::Value":$component_shape,
"llvm::ArrayRef<std::variant<mlir::Value, std::tuple<mlir::Value, mlir::Value, mlir::Value>>>":$subscripts,
CArg<"mlir::ValueRange", "{}">:$substring,
CArg<"llvm::Optional<bool>", "{}">:$complex_part,
CArg<"std::optional<bool>", "{}">:$complex_part,
CArg<"mlir::Value", "{}">:$shape, CArg<"mlir::ValueRange", "{}">:$typeparams,
CArg<"fir::FortranVariableFlagsAttr", "{}">:$fortran_attrs)>,

View File

@ -552,7 +552,7 @@ void CodeGenAction::generateLLVMIR() {
}
// Translate to LLVM IR
llvm::Optional<llvm::StringRef> moduleName = mlirModule->getName();
std::optional<llvm::StringRef> moduleName = mlirModule->getName();
llvmModule = mlir::translateModuleToLLVMIR(
*mlirModule, *llvmCtx, moduleName ? *moduleName : "FIRModule");

View File

@ -99,7 +99,7 @@ private:
else
resultType = fir::ReferenceType::get(resultValueType);
llvm::Optional<bool> complexPart;
std::optional<bool> complexPart;
llvm::SmallVector<mlir::Value> substring;
auto designate = getBuilder().create<hlfir::DesignateOp>(
getLoc(), resultType, partInfo.base.getBase(), "",

View File

@ -1618,7 +1618,7 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
mlir::Value base, mlir::Value outerOffset,
mlir::ValueRange cstInteriorIndices,
mlir::ValueRange componentIndices,
llvm::Optional<mlir::Value> substringOffset) const {
std::optional<mlir::Value> substringOffset) const {
llvm::SmallVector<mlir::LLVM::GEPArg> gepArgs{outerOffset};
mlir::Type resultTy =
base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType();
@ -1907,7 +1907,7 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
if (hasSlice || hasSubcomp || hasSubstr) {
// Shift the base address.
llvm::SmallVector<mlir::Value> fieldIndices;
llvm::Optional<mlir::Value> substringOffset;
std::optional<mlir::Value> substringOffset;
if (hasSubcomp)
getSubcomponentIndices(xbox, xbox.getMemref(), operands, fieldIndices);
if (hasSubstr)
@ -2047,7 +2047,7 @@ private:
base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base);
llvm::SmallVector<mlir::Value> fieldIndices;
llvm::Optional<mlir::Value> substringOffset;
std::optional<mlir::Value> substringOffset;
if (!rebox.getSubcomponent().empty())
getSubcomponentIndices(rebox, rebox.getBox(), operands, fieldIndices);
if (!rebox.getSubstr().empty())
@ -2725,7 +2725,7 @@ private:
if (hasKnownShape && hasSubdimension) {
offs.push_back(0);
}
llvm::Optional<int> dims;
std::optional<int> dims;
llvm::SmallVector<mlir::Value> arrIdx;
for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) {
mlir::Value nxtOpnd = operands[i];
@ -2930,7 +2930,7 @@ struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> {
// TODO: String comparaison should be avoided. Replace linkName with an
// enumeration.
mlir::LLVM::Linkage
convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const {
convertLinkage(std::optional<llvm::StringRef> optLinkage) const {
if (optLinkage) {
auto name = *optLinkage;
if (name == "internal")
@ -3002,7 +3002,7 @@ struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> {
};
static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
llvm::Optional<mlir::ValueRange> destOps,
std::optional<mlir::ValueRange> destOps,
mlir::ConversionPatternRewriter &rewriter,
mlir::Block *newBlock) {
if (destOps)
@ -3013,7 +3013,7 @@ static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
}
template <typename A, typename B>
static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps,
static void genBrOp(A caseOp, mlir::Block *dest, std::optional<B> destOps,
mlir::ConversionPatternRewriter &rewriter) {
if (destOps)
rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, *destOps, dest);
@ -3023,7 +3023,7 @@ static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps,
static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
mlir::Block *dest,
llvm::Optional<mlir::ValueRange> destOps,
std::optional<mlir::ValueRange> destOps,
mlir::ConversionPatternRewriter &rewriter) {
auto *thisBlock = rewriter.getInsertionBlock();
auto *newBlock = createBlock(rewriter, dest);
@ -3069,9 +3069,9 @@ struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> {
auto loc = caseOp.getLoc();
for (unsigned t = 0; t != conds; ++t) {
mlir::Block *dest = caseOp.getSuccessor(t);
llvm::Optional<mlir::ValueRange> destOps =
std::optional<mlir::ValueRange> destOps =
caseOp.getSuccessorOperands(adaptor.getOperands(), t);
llvm::Optional<mlir::ValueRange> cmpOps =
std::optional<mlir::ValueRange> cmpOps =
*caseOp.getCompareOperands(adaptor.getOperands(), t);
mlir::Value caseArg = *(cmpOps.value().begin());
mlir::Attribute attr = cases[t];

View File

@ -2529,11 +2529,11 @@ getMutableSuccessorOperands(unsigned pos, mlir::MutableOperandRange operands,
mlir::MutableOperandRange::OperandSegment(pos, targetOffsetAttr));
}
llvm::Optional<mlir::OperandRange> fir::SelectOp::getCompareOperands(unsigned) {
std::optional<mlir::OperandRange> fir::SelectOp::getCompareOperands(unsigned) {
return {};
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectOp::getCompareOperands(llvm::ArrayRef<mlir::Value>, unsigned) {
return {};
}
@ -2543,7 +2543,7 @@ mlir::SuccessorOperands fir::SelectOp::getSuccessorOperands(unsigned oper) {
oper, getTargetArgsMutable(), getTargetOffsetAttr()));
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
unsigned oper) {
auto a =
@ -2553,7 +2553,7 @@ fir::SelectOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
}
llvm::Optional<mlir::ValueRange>
std::optional<mlir::ValueRange>
fir::SelectOp::getSuccessorOperands(mlir::ValueRange operands, unsigned oper) {
auto a =
(*this)->getAttrOfType<mlir::DenseI32ArrayAttr>(getTargetOffsetAttr());
@ -2572,14 +2572,14 @@ unsigned fir::SelectOp::targetOffsetSize() {
// SelectCaseOp
//===----------------------------------------------------------------------===//
llvm::Optional<mlir::OperandRange>
std::optional<mlir::OperandRange>
fir::SelectCaseOp::getCompareOperands(unsigned cond) {
auto a =
(*this)->getAttrOfType<mlir::DenseI32ArrayAttr>(getCompareOffsetAttr());
return {getSubOperands(cond, getCompareArgs(), a)};
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectCaseOp::getCompareOperands(llvm::ArrayRef<mlir::Value> operands,
unsigned cond) {
auto a =
@ -2589,7 +2589,7 @@ fir::SelectCaseOp::getCompareOperands(llvm::ArrayRef<mlir::Value> operands,
return {getSubOperands(cond, getSubOperands(1, operands, segments), a)};
}
llvm::Optional<mlir::ValueRange>
std::optional<mlir::ValueRange>
fir::SelectCaseOp::getCompareOperands(mlir::ValueRange operands,
unsigned cond) {
auto a =
@ -2604,7 +2604,7 @@ mlir::SuccessorOperands fir::SelectCaseOp::getSuccessorOperands(unsigned oper) {
oper, getTargetArgsMutable(), getTargetOffsetAttr()));
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectCaseOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
unsigned oper) {
auto a =
@ -2614,7 +2614,7 @@ fir::SelectCaseOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
}
llvm::Optional<mlir::ValueRange>
std::optional<mlir::ValueRange>
fir::SelectCaseOp::getSuccessorOperands(mlir::ValueRange operands,
unsigned oper) {
auto a =
@ -2864,12 +2864,12 @@ void fir::SelectRankOp::print(mlir::OpAsmPrinter &p) {
printIntegralSwitchTerminator(*this, p);
}
llvm::Optional<mlir::OperandRange>
std::optional<mlir::OperandRange>
fir::SelectRankOp::getCompareOperands(unsigned) {
return {};
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectRankOp::getCompareOperands(llvm::ArrayRef<mlir::Value>, unsigned) {
return {};
}
@ -2879,7 +2879,7 @@ mlir::SuccessorOperands fir::SelectRankOp::getSuccessorOperands(unsigned oper) {
oper, getTargetArgsMutable(), getTargetOffsetAttr()));
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectRankOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
unsigned oper) {
auto a =
@ -2889,7 +2889,7 @@ fir::SelectRankOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
}
llvm::Optional<mlir::ValueRange>
std::optional<mlir::ValueRange>
fir::SelectRankOp::getSuccessorOperands(mlir::ValueRange operands,
unsigned oper) {
auto a =
@ -2909,12 +2909,12 @@ unsigned fir::SelectRankOp::targetOffsetSize() {
// SelectTypeOp
//===----------------------------------------------------------------------===//
llvm::Optional<mlir::OperandRange>
std::optional<mlir::OperandRange>
fir::SelectTypeOp::getCompareOperands(unsigned) {
return {};
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectTypeOp::getCompareOperands(llvm::ArrayRef<mlir::Value>, unsigned) {
return {};
}
@ -2924,7 +2924,7 @@ mlir::SuccessorOperands fir::SelectTypeOp::getSuccessorOperands(unsigned oper) {
oper, getTargetArgsMutable(), getTargetOffsetAttr()));
}
llvm::Optional<llvm::ArrayRef<mlir::Value>>
std::optional<llvm::ArrayRef<mlir::Value>>
fir::SelectTypeOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
unsigned oper) {
auto a =
@ -2934,7 +2934,7 @@ fir::SelectTypeOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
}
llvm::Optional<mlir::ValueRange>
std::optional<mlir::ValueRange>
fir::SelectTypeOp::getSuccessorOperands(mlir::ValueRange operands,
unsigned oper) {
auto a =
@ -3225,7 +3225,7 @@ mkNamedIntegerAttr(mlir::OpBuilder &builder, llvm::StringRef name, int64_t v) {
void fir::StringLitOp::build(mlir::OpBuilder &builder,
mlir::OperationState &result,
fir::CharacterType inType, llvm::StringRef val,
llvm::Optional<int64_t> len) {
std::optional<int64_t> len) {
auto valAttr = builder.getNamedAttr(value(), builder.getStringAttr(val));
int64_t length = len ? *len : inType.getLen();
auto lenAttr = mkNamedIntegerAttr(builder, size(), length);
@ -3247,7 +3247,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder,
mlir::OperationState &result,
fir::CharacterType inType,
llvm::ArrayRef<char> vlist,
llvm::Optional<std::int64_t> len) {
std::optional<std::int64_t> len) {
auto valAttr =
builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist));
std::int64_t length = len ? *len : inType.getLen();
@ -3260,7 +3260,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder,
mlir::OperationState &result,
fir::CharacterType inType,
llvm::ArrayRef<char16_t> vlist,
llvm::Optional<std::int64_t> len) {
std::optional<std::int64_t> len) {
auto valAttr =
builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist));
std::int64_t length = len ? *len : inType.getLen();
@ -3273,7 +3273,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder,
mlir::OperationState &result,
fir::CharacterType inType,
llvm::ArrayRef<char32_t> vlist,
llvm::Optional<std::int64_t> len) {
std::optional<std::int64_t> len) {
auto valAttr =
builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist));
std::int64_t length = len ? *len : inType.getLen();

View File

@ -57,7 +57,7 @@ fir::FortranVariableOpInterface::verifyDeclareLikeOpImpl(mlir::Value memref) {
shapeRank = shape.getType().cast<fir::ShiftType>().getRank();
}
llvm::Optional<unsigned> rank = getRank();
std::optional<unsigned> rank = getRank();
if (!rank || *rank != shapeRank)
return emitOpError("has conflicting shape and base operand ranks");
} else if (!sourceIsBox) {

View File

@ -87,7 +87,7 @@ void hlfir::DesignateOp::build(
mlir::OpBuilder &builder, mlir::OperationState &result,
mlir::Type result_type, mlir::Value memref, llvm::StringRef component,
mlir::Value component_shape, llvm::ArrayRef<Subscript> subscripts,
mlir::ValueRange substring, llvm::Optional<bool> complex_part,
mlir::ValueRange substring, std::optional<bool> complex_part,
mlir::Value shape, mlir::ValueRange typeparams,
fir::FortranVariableFlagsAttr fortran_attrs) {
auto componentAttr =

View File

@ -397,7 +397,7 @@ public:
for (unsigned idx : orderedTypeGuards) {
auto *dest = selectType.getSuccessor(idx);
llvm::Optional<mlir::ValueRange> destOps =
std::optional<mlir::ValueRange> destOps =
selectType.getSuccessorOperands(operands, idx);
if (typeGuards[idx].dyn_cast<mlir::UnitAttr>())
rewriter.replaceOpWithNewOp<mlir::cf::BranchOp>(selectType, dest);
@ -470,12 +470,13 @@ public:
return 0;
}
mlir::LogicalResult
genTypeLadderStep(mlir::Location loc, mlir::Value selector,
mlir::Attribute attr, mlir::Block *dest,
llvm::Optional<mlir::ValueRange> destOps,
mlir::ModuleOp mod, mlir::PatternRewriter &rewriter,
fir::KindMapping &kindMap) const {
mlir::LogicalResult genTypeLadderStep(mlir::Location loc,
mlir::Value selector,
mlir::Attribute attr, mlir::Block *dest,
std::optional<mlir::ValueRange> destOps,
mlir::ModuleOp mod,
mlir::PatternRewriter &rewriter,
fir::KindMapping &kindMap) const {
mlir::Value cmp;
// TYPE IS type guard comparison are all done inlined.
if (auto a = attr.dyn_cast<fir::ExactTypeAttr>()) {

View File

@ -116,7 +116,7 @@ public:
auto loc = alloca.getLoc();
mlir::Type varTy = alloca.getInType();
auto unpackName =
[](llvm::Optional<llvm::StringRef> opt) -> llvm::StringRef {
[](std::optional<llvm::StringRef> opt) -> llvm::StringRef {
if (opt)
return *opt;
return {};

View File

@ -1341,9 +1341,9 @@ enum class MyIntEnum : uint32_t {
Case20 = 20,
};
llvm::Optional<MyIntEnum> symbolizeMyIntEnum(uint32_t);
std::optional<MyIntEnum> symbolizeMyIntEnum(uint32_t);
llvm::StringRef ConvertToString(MyIntEnum);
llvm::Optional<MyIntEnum> ConvertToEnum(llvm::StringRef);
std::optional<MyIntEnum> ConvertToEnum(llvm::StringRef);
inline constexpr unsigned getMaxEnumValForMyIntEnum() {
return 20;
}
@ -1387,13 +1387,13 @@ llvm::StringRef ConvertToString(MyIntEnum val) {
return "";
}
llvm::Optional<MyIntEnum> ConvertToEnum(llvm::StringRef str) {
return llvm::StringSwitch<llvm::Optional<MyIntEnum>>(str)
std::optional<MyIntEnum> ConvertToEnum(llvm::StringRef str) {
return llvm::StringSwitch<std::optional<MyIntEnum>>(str)
.Case("Case15", MyIntEnum::Case15)
.Case("Case20", MyIntEnum::Case20)
.Default(std::nullopt);
}
llvm::Optional<MyIntEnum> symbolizeMyIntEnum(uint32_t value) {
std::optional<MyIntEnum> symbolizeMyIntEnum(uint32_t value) {
switch (value) {
case 15: return MyIntEnum::Case15;
case 20: return MyIntEnum::Case20;
@ -1430,9 +1430,9 @@ enum class MyBitEnum : uint32_t {
Bit3 = 8,
};
llvm::Optional<MyBitEnum> symbolizeMyBitEnum(uint32_t);
std::optional<MyBitEnum> symbolizeMyBitEnum(uint32_t);
std::string stringifyMyBitEnum(MyBitEnum);
llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef);
std::optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef);
inline constexpr MyBitEnum operator|(MyBitEnum a, MyBitEnum b) {
return static_cast<MyBitEnum>(static_cast<uint32_t>(a) | static_cast<uint32_t>(b));
@ -1462,10 +1462,10 @@ inline std::string stringifyEnum(MyBitEnum enumValue) {
}
template <typename EnumType>
::llvm::Optional<EnumType> symbolizeEnum(::llvm::StringRef);
::std::optional<EnumType> symbolizeEnum(::llvm::StringRef);
template <>
inline ::llvm::Optional<MyBitEnum> symbolizeEnum<MyBitEnum>(::llvm::StringRef str) {
inline ::std::optional<MyBitEnum> symbolizeEnum<MyBitEnum>(::llvm::StringRef str) {
return symbolizeMyBitEnum(str);
}
@ -1506,7 +1506,7 @@ std::string stringifyMyBitEnum(MyBitEnum symbol) {
return llvm::join(strs, "|");
}
llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
std::optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
// Special case for all bits unset.
if (str == "None") return MyBitEnum::None;
@ -1515,7 +1515,7 @@ llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
uint32_t val = 0;
for (auto symbol : symbols) {
auto bit = llvm::StringSwitch<llvm::Optional<uint32_t>>(symbol)
auto bit = llvm::StringSwitch<std::optional<uint32_t>>(symbol)
.Case("tagged", 1)
.Case("Bit1", 2)
.Case("Bit2", 4)
@ -1526,7 +1526,7 @@ llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
return static_cast<MyBitEnum>(val);
}
llvm::Optional<MyBitEnum> symbolizeMyBitEnum(uint32_t value) {
std::optional<MyBitEnum> symbolizeMyBitEnum(uint32_t value) {
// Special case for all bits unset.
if (value == 0) return MyBitEnum::None;

View File

@ -631,14 +631,14 @@ def batchmatmulOp : LinalgNamedStructured_Op<"batchmatmul", [
When `mlir-linalg-ods-gen -gen-impl=1` is called, the following C++ is produced:
```
llvm::Optional<SmallVector<StringRef, 8>> batchmatmul::referenceIterators() {
std::optional<SmallVector<StringRef, 8>> batchmatmul::referenceIterators() {
return SmallVector<StringRef, 8>{
getParallelIteratorTypeName(),
getParallelIteratorTypeName(),
getParallelIteratorTypeName(),
getReductionIteratorTypeName() };
}
llvm::Optional<SmallVector<AffineMap, 8>> batchmatmul::referenceIndexingMaps() {
std::optional<SmallVector<AffineMap, 8>> batchmatmul::referenceIndexingMaps() {
MLIRContext *context = getContext();
AffineExpr d0, d1, d2, d3;
bindDims(context, d0, d1, d2, d3);

View File

@ -243,7 +243,7 @@ private:
/// regions or the parent operation itself, and set either the argument or
/// parent result lattices.
void visitRegionSuccessors(ProgramPoint point, RegionBranchOpInterface branch,
Optional<unsigned> successorIndex,
std::optional<unsigned> successorIndex,
ArrayRef<AbstractSparseLattice *> lattices);
};

View File

@ -61,7 +61,7 @@ protected:
int64_t alignedAllocationGetAlignment(ConversionPatternRewriter &rewriter,
Location loc, OpType op,
const DataLayout *defaultLayout) const {
if (Optional<uint64_t> alignment = op.getAlignment())
if (std::optional<uint64_t> alignment = op.getAlignment())
return *alignment;
// Whenever we don't have alignment set, we will use an alignment

View File

@ -708,7 +708,7 @@ def AffineParallelOp : Affine_Op<"parallel",
unsigned getNumDims();
/// Get ranges as constants, may fail in dynamic case.
Optional<SmallVector<int64_t, 8>> getConstantRanges();
std::optional<SmallVector<int64_t, 8>> getConstantRanges();
Block *getBody();
OpBuilder getBodyBuilder();

View File

@ -268,7 +268,7 @@ def Arith_AddUIExtendedOp : Arith_Op<"addui_extended", [Pure, Commutative,
let hasCanonicalizer = 1;
let extraClassDeclaration = [{
Optional<SmallVector<int64_t, 4>> getShapeForUnroll();
std::optional<SmallVector<int64_t, 4>> getShapeForUnroll();
}];
}
@ -330,7 +330,7 @@ def Arith_MulSIExtendedOp : Arith_Op<"mulsi_extended", [Pure, Commutative,
let hasCanonicalizer = 1;
let extraClassDeclaration = [{
Optional<SmallVector<int64_t, 4>> getShapeForUnroll();
std::optional<SmallVector<int64_t, 4>> getShapeForUnroll();
}];
}
@ -373,7 +373,7 @@ def Arith_MulUIExtendedOp : Arith_Op<"mului_extended", [Pure, Commutative,
let hasCanonicalizer = 1;
let extraClassDeclaration = [{
Optional<SmallVector<int64_t, 4>> getShapeForUnroll();
std::optional<SmallVector<int64_t, 4>> getShapeForUnroll();
}];
}

View File

@ -43,7 +43,7 @@ namespace gpu {
DiagnosedSilenceableFailure mapNestedForeachToThreadsImpl(
RewriterBase &rewriter, Operation *target,
const SmallVectorImpl<int64_t> &blockDim, bool syncAfterDistribute,
llvm::Optional<TransformOpInterface> transformOp,
std::optional<TransformOpInterface> transformOp,
const ArrayRef<DeviceMappingAttrInterface> &threadMappingAttributes);
/// Maps the top level `scf.foreach_thread` op to GPU Thread Blocks. Mapping is

View File

@ -990,8 +990,8 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
let extraClassDeclaration = !strconcat([{
static llvm::Intrinsic::ID getIntrinsicID(
int64_t m, int64_t n, uint64_t k,
llvm::Optional<MMAB1Op> b1Op,
llvm::Optional<MMAIntOverflow> sat,
std::optional<MMAB1Op> b1Op,
std::optional<MMAIntOverflow> sat,
mlir::NVVM::MMALayout layoutAEnum, mlir::NVVM::MMALayout layoutBEnum,
mlir::NVVM::MMATypes eltypeAEnum, mlir::NVVM::MMATypes eltypeBEnum,
mlir::NVVM::MMATypes eltypeCEnum, mlir::NVVM::MMATypes eltypeDEnum) {
@ -1006,7 +1006,7 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
return 0;
}
static Optional<mlir::NVVM::MMATypes> inferOperandMMAType(Type operandElType,
static std::optional<mlir::NVVM::MMATypes> inferOperandMMAType(Type operandElType,
bool isAccumulator);
MMATypes accumPtxType();
@ -1016,10 +1016,10 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
let builders = [
OpBuilder<(ins "Type":$resultType, "ValueRange":$operandA,
"ValueRange":$operandB, "ValueRange":$operandC,
"ArrayRef<int64_t>":$shape, "Optional<MMAB1Op>":$b1Op,
"Optional<MMAIntOverflow>":$intOverflow,
"Optional<std::array<MMATypes, 2>>":$multiplicandPtxTypes,
"Optional<std::array<MMALayout, 2>>":$multiplicandLayouts)>
"ArrayRef<int64_t>":$shape, "std::optional<MMAB1Op>":$b1Op,
"std::optional<MMAIntOverflow>":$intOverflow,
"std::optional<std::array<MMATypes, 2>>":$multiplicandPtxTypes,
"std::optional<std::array<MMALayout, 2>>":$multiplicandLayouts)>
];
string llvmBuilder = [{

View File

@ -47,7 +47,7 @@ class LinalgStructuredBase_Op<string mnemonic, list<Trait> props>
}
void getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// Op has a region, but conceptually the control flow does not enter the
// region.

View File

@ -48,7 +48,7 @@ DiagnosedSilenceableFailure tileToForeachThreadOpImpl(
RewriterBase &rewriter, transform::TransformState &state,
TransformOpInterface transformOp, ArrayRef<Operation *> targets,
ArrayRef<OpFoldResult> mixedNumThreads,
ArrayRef<OpFoldResult> mixedTileSizes, Optional<ArrayAttr> mapping,
ArrayRef<OpFoldResult> mixedTileSizes, std::optional<ArrayAttr> mapping,
SmallVector<Operation *> &tileOps, SmallVector<Operation *> &tiledOps);
} // namespace transform

View File

@ -244,7 +244,7 @@ FailureOr<GenericOp> generalizeNamedOp(RewriterBase &rewriter,
/// smallest constant value for the size of the buffer needed for each
/// dimension. If that is not possible, contains the dynamic size of the
/// subview. The call back should return the buffer to use.
using AllocBufferCallbackFn = std::function<Optional<Value>(
using AllocBufferCallbackFn = std::function<std::optional<Value>(
OpBuilder &b, memref::SubViewOp subView,
ArrayRef<Value> boundingSubViewSize, DataLayout &layout)>;
@ -262,7 +262,7 @@ using CopyCallbackFn =
struct LinalgPromotionOptions {
/// Indices of subViews to promote. If `None`, try to promote all operands.
Optional<DenseSet<unsigned>> operandsToPromote = std::nullopt;
std::optional<DenseSet<unsigned>> operandsToPromote = std::nullopt;
LinalgPromotionOptions &setOperandsToPromote(ArrayRef<int64_t> operands) {
operandsToPromote = DenseSet<unsigned>();
operandsToPromote->insert(operands.begin(), operands.end());
@ -273,7 +273,7 @@ struct LinalgPromotionOptions {
/// Otherwise the partial view will be used. The decision is defaulted to
/// `useFullTileBuffersDefault` when `useFullTileBuffers` is None and for
/// operands missing from `useFullTileBuffers`.
Optional<llvm::SmallBitVector> useFullTileBuffers = std::nullopt;
std::optional<llvm::SmallBitVector> useFullTileBuffers = std::nullopt;
LinalgPromotionOptions &setUseFullTileBuffers(ArrayRef<bool> useFullTiles) {
unsigned size = useFullTiles.size();
llvm::SmallBitVector tmp(size, false);
@ -290,7 +290,7 @@ struct LinalgPromotionOptions {
return *this;
}
/// Alignment of promoted buffer. If `None` do not specify alignment.
Optional<unsigned> alignment = std::nullopt;
std::optional<unsigned> alignment = std::nullopt;
LinalgPromotionOptions &setAlignment(unsigned align) {
alignment = align;
return *this;
@ -304,8 +304,8 @@ struct LinalgPromotionOptions {
/// Callback function to do the allocation of the promoted buffer. If None,
/// then the default allocation scheme of allocating a memref<?xi8> buffer
/// followed by a view operation is used.
Optional<AllocBufferCallbackFn> allocationFn = std::nullopt;
Optional<DeallocBufferCallbackFn> deallocationFn = std::nullopt;
std::optional<AllocBufferCallbackFn> allocationFn = std::nullopt;
std::optional<DeallocBufferCallbackFn> deallocationFn = std::nullopt;
LinalgPromotionOptions &
setAllocationDeallocationFns(AllocBufferCallbackFn const &allocFn,
DeallocBufferCallbackFn const &deallocFn) {
@ -315,8 +315,8 @@ struct LinalgPromotionOptions {
}
/// Callback function to do the copy of data to and from the promoted
/// subview. If None then a memref.copy is used.
Optional<CopyCallbackFn> copyInFn = std::nullopt;
Optional<CopyCallbackFn> copyOutFn = std::nullopt;
std::optional<CopyCallbackFn> copyInFn = std::nullopt;
std::optional<CopyCallbackFn> copyOutFn = std::nullopt;
LinalgPromotionOptions &setCopyInOutFns(CopyCallbackFn const &copyIn,
CopyCallbackFn const &copyOut) {
copyInFn = copyIn;
@ -469,14 +469,14 @@ struct ForeachThreadTilingResult {
FailureOr<ForeachThreadTilingResult>
tileToForeachThreadOp(RewriterBase &builder, TilingInterface op,
ArrayRef<OpFoldResult> numThreads,
Optional<ArrayAttr> mapping);
std::optional<ArrayAttr> mapping);
/// Same as `tileToForeachThreadOp`, but calculate the number of threads
/// required using the given tileSizes.
FailureOr<ForeachThreadTilingResult>
tileToForeachThreadOpUsingTileSizes(RewriterBase &builder, TilingInterface op,
ArrayRef<OpFoldResult> tileSizes,
Optional<ArrayAttr> mapping);
std::optional<ArrayAttr> mapping);
/// Transformation information returned after reduction tiling.
struct ForeachThreadReductionTilingResult {
@ -514,11 +514,10 @@ struct ForeachThreadReductionTilingResult {
/// %6 = linalg.generic %1 ["parallel", "reduction"]
/// : tensor<7x4xf32> -> tensor<7xf32>
/// ```
FailureOr<ForeachThreadReductionTilingResult>
tileReductionUsingForeachThread(RewriterBase &b, PartialReductionOpInterface op,
ArrayRef<OpFoldResult> numThreads,
ArrayRef<OpFoldResult> tileSizes = {},
Optional<ArrayAttr> mapping = std::nullopt);
FailureOr<ForeachThreadReductionTilingResult> tileReductionUsingForeachThread(
RewriterBase &b, PartialReductionOpInterface op,
ArrayRef<OpFoldResult> numThreads, ArrayRef<OpFoldResult> tileSizes = {},
std::optional<ArrayAttr> mapping = std::nullopt);
/// All indices returned by IndexOp should be invariant with respect to
/// tiling. Therefore, if an operation is tiled, we have to transform the
@ -623,7 +622,7 @@ struct LinalgTilingAndFusionOptions {
SmallVector<int64_t> tileInterchange;
/// When specified, specifies distribution of generated tile loops to
/// processors.
Optional<LinalgLoopDistributionOptions> tileDistribution = std::nullopt;
std::optional<LinalgLoopDistributionOptions> tileDistribution = std::nullopt;
LinalgTilingAndFusionOptions &
setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) {
tileDistribution = std::move(distributionOptions);
@ -676,7 +675,7 @@ struct LinalgTilingOptions {
/// When specified, specifies distribution of generated tile loops to
/// processors.
Optional<LinalgLoopDistributionOptions> distribution = std::nullopt;
std::optional<LinalgLoopDistributionOptions> distribution = std::nullopt;
LinalgTilingOptions &
setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) {
@ -806,7 +805,7 @@ struct CopyVectorizationPattern : public OpRewritePattern<memref::CopyOp> {
};
/// Return vector::CombiningKind for the given op.
llvm::Optional<vector::CombiningKind> getCombinerOpKind(Operation *combinerOp);
std::optional<vector::CombiningKind> getCombinerOpKind(Operation *combinerOp);
//===----------------------------------------------------------------------===//
// Transformations exposed as rewrite patterns.
@ -966,7 +965,7 @@ struct ExtractSliceOfPadTensorSwapPattern
///
/// See the documentation for tensor::bubbleUpPadSlice regarding zero slice
/// guard.
using ControlFn = std::function<llvm::Optional<bool>(tensor::ExtractSliceOp)>;
using ControlFn = std::function<std::optional<bool>(tensor::ExtractSliceOp)>;
ExtractSliceOfPadTensorSwapPattern(MLIRContext *context,
ControlFn controlFn = nullptr,

View File

@ -586,7 +586,7 @@ def MemRef_DimOp : MemRef_Op<"dim", [
let extraClassDeclaration = [{
/// Helper function to get the index as a simple integer if it is constant.
Optional<int64_t> getConstantIndex();
std::optional<int64_t> getConstantIndex();
/// Interface method of ShapedDimOpInterface: Return the source memref.
Value getShapedValue() { return getSource(); }

View File

@ -816,7 +816,7 @@ def PDLInterp_GetOperandsOp : PDLInterp_Op<"get_operands", [Pure]> {
let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict";
let builders = [
OpBuilder<(ins "Type":$resultType, "Value":$inputOp,
"Optional<unsigned>":$index), [{
"std::optional<unsigned>":$index), [{
build($_builder, $_state, resultType, inputOp,
index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr());
}]>,
@ -883,7 +883,7 @@ def PDLInterp_GetResultsOp : PDLInterp_Op<"get_results", [Pure]> {
let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict";
let builders = [
OpBuilder<(ins "Type":$resultType, "Value":$inputOp,
"Optional<unsigned>":$index), [{
"std::optional<unsigned>":$index), [{
build($_builder, $_state, resultType, inputOp,
index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr());
}]>,

View File

@ -274,7 +274,7 @@ def ForOp : SCF_Op<"for",
}
/// Get the iter arg number for an operand. If it isnt an iter arg
/// operand return std::nullopt.
Optional<unsigned> getIterArgNumberForOpOperand(OpOperand &opOperand) {
std::optional<unsigned> getIterArgNumberForOpOperand(OpOperand &opOperand) {
if (opOperand.getOwner() != getOperation())
return std::nullopt;
unsigned operandNumber = opOperand.getOperandNumber();
@ -331,10 +331,10 @@ def ForOp : SCF_Op<"for",
/// correspond to the loop iterator operands, i.e., those exclusing the
/// induction variable. LoopOp only has one region, so 0 is the only valid
/// value for `index`.
OperandRange getSuccessorEntryOperands(Optional<unsigned> index);
OperandRange getSuccessorEntryOperands(std::optional<unsigned> index);
/// Returns the step as an `APInt` if it is constant.
Optional<APInt> getConstantStep();
std::optional<APInt> getConstantStep();
/// Interface method for ConditionallySpeculatable.
Speculation::Speculatability getSpeculatability();
@ -496,7 +496,7 @@ def ForeachThreadOp : SCF_Op<"foreach_thread", [
let builders = [
// Bodyless builder, outputs must be specified.
OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads,
"Optional<ArrayAttr>":$mapping)>,
"std::optional<ArrayAttr>":$mapping)>,
// Builder that takes a bodyBuilder lambda.
OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads,
"ArrayRef<Attribute>":$mapping,
@ -1003,7 +1003,7 @@ def WhileOp : SCF_Op<"while",
using BodyBuilderFn =
function_ref<void(OpBuilder &, Location, ValueRange)>;
OperandRange getSuccessorEntryOperands(Optional<unsigned> index);
OperandRange getSuccessorEntryOperands(std::optional<unsigned> index);
ConditionOp getConditionOp();
YieldOp getYieldOp();
Block::BlockArgListType getBeforeArguments();

View File

@ -31,7 +31,8 @@ parseEnumKeywordAttr(EnumClass &value, ParserType &parser,
auto loc = parser.getCurrentLocation();
if (parser.parseKeyword(&keyword))
return failure();
if (Optional<EnumClass> attr = spirv::symbolizeEnum<EnumClass>(keyword)) {
if (std::optional<EnumClass> attr =
spirv::symbolizeEnum<EnumClass>(keyword)) {
value = *attr;
return success();
}

View File

@ -492,11 +492,11 @@ def SPIRV_ModuleOp : SPIRV_Op<"module",
let regions = (region AnyRegion);
let builders = [
OpBuilder<(ins CArg<"Optional<StringRef>", "std::nullopt">:$name)>,
OpBuilder<(ins CArg<"std::optional<StringRef>", "std::nullopt">:$name)>,
OpBuilder<(ins "spirv::AddressingModel":$addressing_model,
"spirv::MemoryModel":$memory_model,
CArg<"Optional<spirv::VerCapExtAttr>", "std::nullopt">:$vce_triple,
CArg<"Optional<StringRef>", "std::nullopt">:$name)>
CArg<"std::optional<spirv::VerCapExtAttr>", "std::nullopt">:$vce_triple,
CArg<"std::optional<StringRef>", "std::nullopt">:$name)>
];
// We need to ensure the block inside the region is properly terminated;
@ -511,7 +511,7 @@ def SPIRV_ModuleOp : SPIRV_Op<"module",
bool isOptionalSymbol() { return true; }
Optional<StringRef> getName() { return getSymName(); }
std::optional<StringRef> getName() { return getSymName(); }
static StringRef getVCETripleAttrName() { return "vce_triple"; }
}];

View File

@ -352,7 +352,7 @@ def Shape_DimOp : Shape_Op<"dim",
let extraClassDeclaration = [{
/// Get the `index` value as integer if it is constant.
Optional<int64_t> getConstantIndex();
std::optional<int64_t> getConstantIndex();
/// Returns when two result types are compatible for this op; method used by
/// InferTypeOpInterface
@ -383,7 +383,7 @@ def Shape_GetExtentOp : Shape_Op<"get_extent",
let extraClassDeclaration = [{
/// Get the `dim` value as integer if it is constant.
Optional<int64_t> getConstantDim();
std::optional<int64_t> getConstantDim();
/// Returns when two result types are compatible for this op; method used by
/// InferTypeOpInterface
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);

View File

@ -67,8 +67,8 @@ def SparseTensor_StorageSpecifier : SparseTensor_Type<"StorageSpecifier"> {
let extraClassDeclaration = [{
// Get the integer type used to store memory and dimension sizes.
IntegerType getSizesType() const;
Type getFieldType(StorageSpecifierKind kind, Optional<unsigned> dim) const;
Type getFieldType(StorageSpecifierKind kind, Optional<APInt> dim) const;
Type getFieldType(StorageSpecifierKind kind, std::optional<unsigned> dim) const;
Type getFieldType(StorageSpecifierKind kind, std::optional<APInt> dim) const;
}];
let assemblyFormat="`<` qualified($encoding) `>`";

View File

@ -129,7 +129,7 @@ def Tensor_DimOp : Tensor_Op<"dim", [
let extraClassDeclaration = [{
/// Helper function to get the index as a simple integer if it is constant.
Optional<int64_t> getConstantIndex();
std::optional<int64_t> getConstantIndex();
/// Interface method of ShapedDimOpInterface: Return the source tensor.
Value getShapedValue() { return getSource(); }
@ -380,7 +380,7 @@ def Tensor_ExtractSliceOp : Tensor_OpWithOffsetSizesAndStrides<"extract_slice",
/// Compute the rank-reduction mask that can be applied to map the source
/// tensor type to the result tensor type by dropping unit dims.
llvm::Optional<llvm::SmallDenseSet<unsigned>>
std::optional<llvm::SmallDenseSet<unsigned>>
computeRankReductionMask() {
return ::mlir::computeRankReductionMask(getSourceType().getShape(),
getType().getShape());

View File

@ -67,7 +67,7 @@ SmallVector<OpFoldResult> getAsOpFoldResult(ValueRange values);
SmallVector<OpFoldResult> getAsOpFoldResult(ArrayAttr arrayAttr);
/// If ofr is a constant integer or an IntegerAttr, return the integer.
Optional<int64_t> getConstantIntValue(OpFoldResult ofr);
std::optional<int64_t> getConstantIntValue(OpFoldResult ofr);
/// Return true if `ofr` is constant integer equal to `value`.
bool isConstantIntValue(OpFoldResult ofr, int64_t value);

View File

@ -118,7 +118,7 @@ void populateBubbleVectorBitCastOpPatterns(RewritePatternSet &patterns,
/// VectorToSCF, which reduces the rank of vector transfer ops.
void populateVectorTransferLoweringPatterns(
RewritePatternSet &patterns,
llvm::Optional<unsigned> maxTransferRank = std::nullopt,
std::optional<unsigned> maxTransferRank = std::nullopt,
PatternBenefit benefit = 1);
/// These patterns materialize masks for various vector ops such as transfers.

View File

@ -1370,19 +1370,19 @@ def Vector_TransferReadOp :
"Value":$source,
"ValueRange":$indices,
"AffineMap":$permutationMap,
CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
/// 3. Builder that sets permutation map to 'getMinorIdentityMap'.
OpBuilder<(ins "VectorType":$vectorType,
"Value":$source,
"ValueRange":$indices,
"Value":$padding,
CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
/// 4. Builder that sets padding to zero and permutation map to
/// 'getMinorIdentityMap'.
OpBuilder<(ins "VectorType":$vectorType,
"Value":$source,
"ValueRange":$indices,
CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
];
let extraClassDeclaration = [{
@ -1521,13 +1521,13 @@ def Vector_TransferWriteOp :
"Value":$dest,
"ValueRange":$indices,
"AffineMap":$permutationMap,
CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
/// 4. Builder with type inference that sets an empty mask and sets permutation
/// map to 'getMinorIdentityMap'.
OpBuilder<(ins "Value":$vector,
"Value":$dest,
"ValueRange":$indices,
CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
];
let extraClassDeclaration = [{

View File

@ -58,13 +58,13 @@ def ModuleOp : Builtin_Op<"module", [
let regions = (region SizedRegion<1>:$bodyRegion);
let assemblyFormat = "($sym_name^)? attr-dict-with-keyword $bodyRegion";
let builders = [OpBuilder<(ins CArg<"Optional<StringRef>", "{}">:$name)>];
let builders = [OpBuilder<(ins CArg<"std::optional<StringRef>", "{}">:$name)>];
let extraClassDeclaration = [{
/// Construct a module from the given location with an optional name.
static ModuleOp create(Location loc, Optional<StringRef> name = std::nullopt);
static ModuleOp create(Location loc, std::optional<StringRef> name = std::nullopt);
/// Return the name of this module if present.
Optional<StringRef> getName() { return getSymName(); }
std::optional<StringRef> getName() { return getSymName(); }
//===------------------------------------------------------------------===//
// SymbolOpInterface Methods

View File

@ -63,7 +63,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
type. If a shape is not provided, the current shape of the type is used.
}],
"::mlir::ShapedType", "cloneWith", (ins
"::llvm::Optional<::llvm::ArrayRef<int64_t>>":$shape,
"::std::optional<::llvm::ArrayRef<int64_t>>":$shape,
"::mlir::Type":$elementType
)>,

View File

@ -90,7 +90,7 @@ public:
/// Clone this type with the given shape and element type. If the
/// provided shape is `None`, the current shape of the type is used.
TensorType cloneWith(Optional<ArrayRef<int64_t>> shape,
TensorType cloneWith(std::optional<ArrayRef<int64_t>> shape,
Type elementType) const;
/// Return true if the specified element type is ok in a tensor.
@ -126,7 +126,7 @@ public:
/// Clone this type with the given shape and element type. If the
/// provided shape is `None`, the current shape of the type is used.
BaseMemRefType cloneWith(Optional<ArrayRef<int64_t>> shape,
BaseMemRefType cloneWith(std::optional<ArrayRef<int64_t>> shape,
Type elementType) const;
/// Return true if the specified element type is ok in a memref.
@ -337,7 +337,7 @@ private:
/// which dimensions must be kept when e.g. compute MemRef strides under
/// rank-reducing operations. Return std::nullopt if reducedShape cannot be
/// obtained by dropping only `1` entries in `originalShape`.
llvm::Optional<llvm::SmallDenseSet<unsigned>>
std::optional<llvm::SmallDenseSet<unsigned>>
computeRankReductionMask(ArrayRef<int64_t> originalShape,
ArrayRef<int64_t> reducedShape);

View File

@ -1031,7 +1031,7 @@ def Builtin_Vector : Builtin_Type<"Vector", [
/// Clone this vector type with the given shape and element type. If the
/// provided shape is `None`, the current shape of the type is used.
VectorType cloneWith(Optional<ArrayRef<int64_t>> shape,
VectorType cloneWith(std::optional<ArrayRef<int64_t>> shape,
Type elementType) const;
}];
let skipDefaultBuilders = 1;

View File

@ -487,19 +487,19 @@ InFlightDiagnostic emitRemark(Location loc, const Twine &message);
/// the diagnostic arguments directly instead of relying on the returned
/// InFlightDiagnostic.
template <typename... Args>
LogicalResult emitOptionalError(Optional<Location> loc, Args &&...args) {
LogicalResult emitOptionalError(std::optional<Location> loc, Args &&...args) {
if (loc)
return emitError(*loc).append(std::forward<Args>(args)...);
return failure();
}
template <typename... Args>
LogicalResult emitOptionalWarning(Optional<Location> loc, Args &&...args) {
LogicalResult emitOptionalWarning(std::optional<Location> loc, Args &&...args) {
if (loc)
return emitWarning(*loc).append(std::forward<Args>(args)...);
return failure();
}
template <typename... Args>
LogicalResult emitOptionalRemark(Optional<Location> loc, Args &&...args) {
LogicalResult emitOptionalRemark(std::optional<Location> loc, Args &&...args) {
if (loc)
return emitRemark(*loc).append(std::forward<Args>(args)...);
return failure();

View File

@ -115,7 +115,8 @@ public:
/// By default this will lookup for registered operations and return the
/// `parse()` method registered on the RegisteredOperationName. Dialects can
/// override this behavior and handle unregistered operations as well.
virtual Optional<ParseOpHook> getParseOperationHook(StringRef opName) const;
virtual std::optional<ParseOpHook>
getParseOperationHook(StringRef opName) const;
/// Print an operation registered to this dialect.
/// This hook is invoked for registered operation which don't override the

View File

@ -130,7 +130,7 @@ class EnumAttrInfo<
// type to the corresponding symbol. It will have the following signature:
//
// ```c++
// llvm::Optional<<qualified-enum-class-name>> <fn-name>(<underlying-type>);
// std::optional<<qualified-enum-class-name>> <fn-name>(<underlying-type>);
// ```
string underlyingToSymbolFnName = "symbolize" # name;
@ -138,7 +138,7 @@ class EnumAttrInfo<
// corresponding symbol. It will have the following signature:
//
// ```c++
// llvm::Optional<<qualified-enum-class-name>> <fn-name>(llvm::StringRef);
// std::optional<<qualified-enum-class-name>> <fn-name>(llvm::StringRef);
// ```
string stringToSymbolFnName = "symbolize" # name;

View File

@ -1033,7 +1033,7 @@ class OptionalAttr<Attr attr> : Attr<attr.predicate, attr.summary> {
// Rewrite the attribute to be optional.
// Note: this has to be kept up to date with Attr above.
let storageType = attr.storageType;
let returnType = "::llvm::Optional<" # attr.returnType #">";
let returnType = "::std::optional<" # attr.returnType #">";
let convertFromStorage = "$_self ? " # returnType # "(" #
attr.convertFromStorage # ") : (::std::nullopt)";
let valueType = attr.valueType;

View File

@ -128,7 +128,7 @@ namespace detail {
/// Return the `BlockArgument` corresponding to operand `operandIndex` in some
/// successor if `operandIndex` is within the range of `operands`, or
/// std::nullopt if `operandIndex` isn't a successor operand index.
Optional<BlockArgument>
std::optional<BlockArgument>
getBranchSuccessorArgument(const SuccessorOperands &operands,
unsigned operandIndex, Block *successor);
@ -164,8 +164,10 @@ public:
RegionSuccessor(Region *region, Block::BlockArgListType regionInputs = {})
: region(region), inputs(regionInputs) {}
/// Initialize a successor that branches back to/out of the parent operation.
RegionSuccessor(Optional<Operation::result_range> results = {})
: inputs(results ? ValueRange(*results) : ValueRange()) {}
RegionSuccessor(Operation::result_range results)
: inputs(ValueRange(results)) {}
/// Constructor with no arguments.
RegionSuccessor() : inputs(ValueRange()) {}
/// Return the given region successor. Returns nullptr if the successor is the
/// parent operation.
@ -190,7 +192,8 @@ class InvocationBounds {
public:
/// Create invocation bounds. The lower bound must be at least 0 and only the
/// upper bound can be unknown.
InvocationBounds(unsigned lb, Optional<unsigned> ub) : lower(lb), upper(ub) {
InvocationBounds(unsigned lb, std::optional<unsigned> ub)
: lower(lb), upper(ub) {
assert((!ub || ub >= lb) && "upper bound cannot be less than lower bound");
}
@ -198,7 +201,7 @@ public:
unsigned getLowerBound() const { return lower; }
/// Return the upper bound.
Optional<unsigned> getUpperBound() const { return upper; }
std::optional<unsigned> getUpperBound() const { return upper; }
/// Returns the unknown invocation bounds, i.e., there is no information on
/// how many times a region may be invoked.
@ -209,7 +212,7 @@ private:
unsigned lower;
/// The maximum number of times the successor region will be invoked or
/// `std::nullopt` if an upper bound is not known.
Optional<unsigned> upper;
std::optional<unsigned> upper;
};
/// Return `true` if `a` and `b` are in mutually exclusive regions as per
@ -241,16 +244,16 @@ bool isRegionReturnLike(Operation *operation);
/// `OperandRange` represents all operands that are passed to the specified
/// successor region. If `regionIndex` is `std::nullopt`, all operands that are
/// passed to the parent operation will be returned.
Optional<MutableOperandRange>
std::optional<MutableOperandRange>
getMutableRegionBranchSuccessorOperands(Operation *operation,
Optional<unsigned> regionIndex);
std::optional<unsigned> regionIndex);
/// Returns the read only operands that are passed to the region with the given
/// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more
/// information.
Optional<OperandRange>
std::optional<OperandRange>
getRegionBranchSuccessorOperands(Operation *operation,
Optional<unsigned> regionIndex);
std::optional<unsigned> regionIndex);
//===----------------------------------------------------------------------===//
// ControlFlow Traits

View File

@ -70,11 +70,11 @@ def BranchOpInterface : OpInterface<"BranchOpInterface"> {
some successor, or None if `operandIndex` isn't a successor operand
index.
}],
"::llvm::Optional<::mlir::BlockArgument>", "getSuccessorBlockArgument",
"::std::optional<::mlir::BlockArgument>", "getSuccessorBlockArgument",
(ins "unsigned":$operandIndex), [{
::mlir::Operation *opaqueOp = $_op;
for (unsigned i = 0, e = opaqueOp->getNumSuccessors(); i != e; ++i) {
if (::llvm::Optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument(
if (::std::optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument(
$_op.getSuccessorOperands(i), operandIndex,
opaqueOp->getSuccessor(i)))
return arg;
@ -140,7 +140,7 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
`getSuccessorRegions`.
}],
"::mlir::OperandRange", "getSuccessorEntryOperands",
(ins "::llvm::Optional<unsigned>":$index), [{}],
(ins "::std::optional<unsigned>":$index), [{}],
/*defaultImplementation=*/[{
auto operandEnd = this->getOperation()->operand_end();
return ::mlir::OperandRange(operandEnd, operandEnd);
@ -161,7 +161,7 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
successor region must be non-empty.
}],
"void", "getSuccessorRegions",
(ins "::llvm::Optional<unsigned>":$index,
(ins "::std::optional<unsigned>":$index,
"::llvm::ArrayRef<::mlir::Attribute>":$operands,
"::llvm::SmallVectorImpl<::mlir::RegionSuccessor> &":$regions)
>,
@ -208,7 +208,7 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
let extraClassDeclaration = [{
/// Convenience helper in case none of the operands is known.
void getSuccessorRegions(Optional<unsigned> index,
void getSuccessorRegions(std::optional<unsigned> index,
SmallVectorImpl<RegionSuccessor> &regions);
/// Return `true` if control flow originating from the given region may
@ -239,7 +239,7 @@ def RegionBranchTerminatorOpInterface :
the parent operation.
}],
"::mlir::MutableOperandRange", "getMutableSuccessorOperands",
(ins "::llvm::Optional<unsigned>":$index)
(ins "::std::optional<unsigned>":$index)
>,
InterfaceMethod<[{
Returns a range of operands that are semantically "returned" by passing
@ -248,7 +248,7 @@ def RegionBranchTerminatorOpInterface :
operation.
}],
"::mlir::OperandRange", "getSuccessorOperands",
(ins "::llvm::Optional<unsigned>":$index), [{}],
(ins "::std::optional<unsigned>":$index), [{}],
/*defaultImplementation=*/[{
return $_op.getMutableSuccessorOperands(index);
}]

View File

@ -235,12 +235,13 @@ namespace detail {
// TODO: Consider generating typedefs for trait member functions if this usage
// becomes more common.
LogicalResult inferReturnTensorTypes(
function_ref<LogicalResult(
MLIRContext *, Optional<Location> location, ValueShapeRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &retComponents)>
function_ref<
LogicalResult(MLIRContext *, std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &retComponents)>
componentTypeFn,
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes);
@ -272,7 +273,7 @@ template <typename ConcreteType>
class InferTensorType : public TraitBase<ConcreteType, InferTensorType> {
public:
static LogicalResult
inferReturnTypes(MLIRContext *context, Optional<Location> location,
inferReturnTypes(MLIRContext *context, std::optional<Location> location,
ValueRange operands, DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {

View File

@ -41,7 +41,7 @@ def InferTypeOpInterface : OpInterface<"InferTypeOpInterface"> {
/*retTy=*/"::mlir::LogicalResult",
/*methodName=*/"inferReturnTypes",
/*args=*/(ins "::mlir::MLIRContext *":$context,
"::llvm::Optional<::mlir::Location>":$location,
"::std::optional<::mlir::Location>":$location,
"::mlir::ValueRange":$operands,
"::mlir::DictionaryAttr":$attributes,
"::mlir::RegionRange":$regions,
@ -72,7 +72,7 @@ def InferTypeOpInterface : OpInterface<"InferTypeOpInterface"> {
/*retTy=*/"::mlir::LogicalResult",
/*methodName=*/"refineReturnTypes",
/*args=*/(ins "::mlir::MLIRContext *":$context,
"::llvm::Optional<::mlir::Location>":$location,
"::std::optional<::mlir::Location>":$location,
"::mlir::ValueRange":$operands,
"::mlir::DictionaryAttr":$attributes,
"::mlir::RegionRange":$regions,
@ -144,7 +144,7 @@ def InferShapedTypeOpInterface : OpInterface<"InferShapedTypeOpInterface"> {
/*retTy=*/"::mlir::LogicalResult",
/*methodName=*/"inferReturnTypeComponents",
/*args=*/(ins "::mlir::MLIRContext*":$context,
"::llvm::Optional<::mlir::Location>":$location,
"::std::optional<::mlir::Location>":$location,
"::mlir::ValueShapeRange":$operands,
"::mlir::DictionaryAttr":$attributes,
"::mlir::RegionRange":$regions,

View File

@ -52,7 +52,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
If there is a single induction variable return it, otherwise return
std::nullopt.
}],
/*retTy=*/"::llvm::Optional<::mlir::Value>",
/*retTy=*/"::std::optional<::mlir::Value>",
/*methodName=*/"getSingleInductionVar",
/*args=*/(ins),
/*methodBody=*/"",
@ -64,7 +64,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
Return the single lower bound value or attribute if it exists, otherwise
return std::nullopt.
}],
/*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>",
/*retTy=*/"::std::optional<::mlir::OpFoldResult>",
/*methodName=*/"getSingleLowerBound",
/*args=*/(ins),
/*methodBody=*/"",
@ -76,7 +76,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
Return the single step value or attribute if it exists, otherwise
return std::nullopt.
}],
/*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>",
/*retTy=*/"::std::optional<::mlir::OpFoldResult>",
/*methodName=*/"getSingleStep",
/*args=*/(ins),
/*methodBody=*/"",
@ -88,7 +88,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
Return the single upper bound value or attribute if it exists, otherwise
return std::nullopt.
}],
/*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>",
/*retTy=*/"::std::optional<::mlir::OpFoldResult>",
/*methodName=*/"getSingleUpperBound",
/*args=*/(ins),
/*methodBody=*/"",

View File

@ -106,7 +106,7 @@ class EffectOpInterfaceBase<string name, string baseEffect>
/// Return the effect of the given type `Effect` that is applied to the
/// given value, or std::nullopt if no effect exists.
template <typename Effect>
::llvm::Optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>>
::std::optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>>
getEffectOnValue(::mlir::Value value) {
llvm::SmallVector<::mlir::SideEffects::EffectInstance<
}] # baseEffect # [{>, 4> effects;

View File

@ -28,7 +28,7 @@ def VectorUnrollOpInterface : OpInterface<"VectorUnrollOpInterface"> {
`targetShape`. Return `None` if the op cannot be unrolled to the target
vector shape.
}],
/*retTy=*/"::llvm::Optional<::llvm::SmallVector<int64_t, 4>>",
/*retTy=*/"::std::optional<::llvm::SmallVector<int64_t, 4>>",
/*methodName=*/"getShapeForUnroll",
/*args=*/(ins),
/*methodBody=*/"",
@ -143,7 +143,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
>,
InterfaceMethod<
/*desc=*/"Return the `in_bounds` boolean ArrayAttr.",
/*retTy=*/"::llvm::Optional<::mlir::ArrayAttr>",
/*retTy=*/"::std::optional<::mlir::ArrayAttr>",
/*methodName=*/"in_bounds",
/*args=*/(ins),
/*methodBody=*/"return $_op.getInBounds();"

View File

@ -45,7 +45,7 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
// this region predecessor that correspond to the input values of `region`. If
// an index could not be found, std::nullopt is returned instead.
auto getOperandIndexIfPred =
[&](Optional<unsigned> predIndex) -> Optional<unsigned> {
[&](std::optional<unsigned> predIndex) -> std::optional<unsigned> {
SmallVector<RegionSuccessor, 2> successors;
branch.getSuccessorRegions(predIndex, successors);
for (RegionSuccessor &successor : successors) {
@ -75,12 +75,12 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
};
// Check branches from the parent operation.
Optional<unsigned> regionIndex;
std::optional<unsigned> regionIndex;
if (region) {
// Determine the actual region number from the passed region.
regionIndex = region->getRegionNumber();
}
if (Optional<unsigned> operandIndex =
if (std::optional<unsigned> operandIndex =
getOperandIndexIfPred(/*predIndex=*/std::nullopt)) {
collectUnderlyingAddressValues(
branch.getSuccessorEntryOperands(regionIndex)[*operandIndex], maxDepth,
@ -89,7 +89,7 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
// Check branches from each child region.
Operation *op = branch.getOperation();
for (int i = 0, e = op->getNumRegions(); i != e; ++i) {
if (Optional<unsigned> operandIndex = getOperandIndexIfPred(i)) {
if (std::optional<unsigned> operandIndex = getOperandIndexIfPred(i)) {
for (Block &block : op->getRegion(i)) {
Operation *term = block.getTerminator();
// Try to determine possible region-branch successor operands for the
@ -211,7 +211,8 @@ static void collectUnderlyingAddressValues(Value value,
/// non-null it specifies the parent operation that the allocation does not
/// escape. If no scope is found, `allocScopeOp` is set to nullptr.
static LogicalResult
getAllocEffectFor(Value value, Optional<MemoryEffects::EffectInstance> &effect,
getAllocEffectFor(Value value,
std::optional<MemoryEffects::EffectInstance> &effect,
Operation *&allocScopeOp) {
// Try to get a memory effect interface for the parent operation.
Operation *op;
@ -249,7 +250,7 @@ static AliasResult aliasImpl(Value lhs, Value rhs) {
if (lhs == rhs)
return AliasResult::MustAlias;
Operation *lhsAllocScope = nullptr, *rhsAllocScope = nullptr;
Optional<MemoryEffects::EffectInstance> lhsAlloc, rhsAlloc;
std::optional<MemoryEffects::EffectInstance> lhsAlloc, rhsAlloc;
// Handle the case where lhs is a constant.
Attribute lhsAttr, rhsAttr;

View File

@ -165,7 +165,7 @@ void IntegerRangeAnalysis::visitNonControlFlowArguments(
/// Given the results of getConstant{Lower,Upper}Bound() or getConstantStep()
/// on a LoopLikeInterface return the lower/upper bound for that result if
/// possible.
auto getLoopBoundFromFold = [&](Optional<OpFoldResult> loopBound,
auto getLoopBoundFromFold = [&](std::optional<OpFoldResult> loopBound,
Type boundType, bool getUpper) {
unsigned int width = ConstantIntRanges::getStorageBitwidth(boundType);
if (loopBound.has_value()) {
@ -190,14 +190,14 @@ void IntegerRangeAnalysis::visitNonControlFlowArguments(
// Infer bounds for loop arguments that have static bounds
if (auto loop = dyn_cast<LoopLikeOpInterface>(op)) {
Optional<Value> iv = loop.getSingleInductionVar();
std::optional<Value> iv = loop.getSingleInductionVar();
if (!iv) {
return SparseDataFlowAnalysis ::visitNonControlFlowArguments(
op, successor, argLattices, firstIndex);
}
Optional<OpFoldResult> lowerBound = loop.getSingleLowerBound();
Optional<OpFoldResult> upperBound = loop.getSingleUpperBound();
Optional<OpFoldResult> step = loop.getSingleStep();
std::optional<OpFoldResult> lowerBound = loop.getSingleLowerBound();
std::optional<OpFoldResult> upperBound = loop.getSingleUpperBound();
std::optional<OpFoldResult> step = loop.getSingleStep();
APInt min = getLoopBoundFromFold(lowerBound, iv->getType(),
/*getUpper=*/false);
APInt max = getLoopBoundFromFold(upperBound, iv->getType(),

View File

@ -208,7 +208,7 @@ void AbstractSparseDataFlowAnalysis::visitBlock(Block *block) {
void AbstractSparseDataFlowAnalysis::visitRegionSuccessors(
ProgramPoint point, RegionBranchOpInterface branch,
Optional<unsigned> successorIndex,
std::optional<unsigned> successorIndex,
ArrayRef<AbstractSparseLattice *> lattices) {
const auto *predecessors = getOrCreateFor<PredecessorState>(point, point);
assert(predecessors->allPredecessorsKnown() &&
@ -216,7 +216,7 @@ void AbstractSparseDataFlowAnalysis::visitRegionSuccessors(
for (Operation *op : predecessors->getKnownPredecessors()) {
// Get the incoming successor operands.
Optional<OperandRange> operands;
std::optional<OperandRange> operands;
// Check if the predecessor is the parent op.
if (op == branch) {
@ -390,7 +390,7 @@ void AbstractSparseBackwardDataFlowAnalysis::visitOperation(Operation *op) {
forwarded.getBeginOperandIndex(), forwarded.size());
for (OpOperand &operand : operands) {
unaccounted.reset(operand.getOperandNumber());
if (Optional<BlockArgument> blockArg =
if (std::optional<BlockArgument> blockArg =
detail::getBranchSuccessorArgument(
successorOperands, operand.getOperandNumber(), block)) {
meet(getLatticeElement(operand.get()),

View File

@ -1871,7 +1871,7 @@ OperationParser::parseCustomOperation(ArrayRef<ResultRecord> resultIDs) {
if (iface && !iface->getDefaultDialect().empty())
defaultDialect = iface->getDefaultDialect();
} else {
Optional<Dialect::ParseOpHook> dialectHook;
std::optional<Dialect::ParseOpHook> dialectHook;
Dialect *dialect = opNameInfo->getDialect();
if (!dialect) {
InFlightDiagnostic diag =

View File

@ -46,7 +46,7 @@ MlirLogicalResult mlirInferTypeOpInterfaceInferReturnTypes(
if (!info)
return mlirLogicalResultFailure();
llvm::Optional<Location> maybeLocation;
std::optional<Location> maybeLocation;
if (!mlirLocationIsNull(location))
maybeLocation = unwrap(location);
SmallVector<Value> unwrappedOperands;

View File

@ -223,7 +223,7 @@ public:
// initialization of the result values.
Attribute reduction = std::get<0>(pair);
Type resultType = std::get<1>(pair);
Optional<arith::AtomicRMWKind> reductionOp =
std::optional<arith::AtomicRMWKind> reductionOp =
arith::symbolizeAtomicRMWKind(
static_cast<uint64_t>(reduction.cast<IntegerAttr>().getInt()));
assert(reductionOp && "Reduction operation cannot be of None Type");
@ -243,7 +243,7 @@ public:
"Unequal number of reductions and operands.");
for (unsigned i = 0, end = reductions.size(); i < end; i++) {
// For each of the reduction operations get the respective mlir::Value.
Optional<arith::AtomicRMWKind> reductionOp =
std::optional<arith::AtomicRMWKind> reductionOp =
arith::symbolizeAtomicRMWKind(
reductions[i].cast<IntegerAttr>().getInt());
assert(reductionOp && "Reduction Operation cannot be of None Type");

View File

@ -442,8 +442,8 @@ private:
return rewriter.create<LLVM::LoadOp>(loc, sizePtr);
}
Optional<int64_t> getConstantDimIndex(memref::DimOp dimOp) const {
if (Optional<int64_t> idx = dimOp.getConstantIndex())
std::optional<int64_t> getConstantDimIndex(memref::DimOp dimOp) const {
if (auto idx = dimOp.getConstantIndex())
return idx;
if (auto constantOp = dimOp.getIndex().getDefiningOp<LLVM::ConstantOp>())
@ -462,7 +462,7 @@ private:
// Take advantage if index is constant.
MemRefType memRefType = operandType.cast<MemRefType>();
if (Optional<int64_t> index = getConstantDimIndex(dimOp)) {
if (std::optional<int64_t> index = getConstantDimIndex(dimOp)) {
int64_t i = *index;
if (memRefType.isDynamicDim(i)) {
// extract dynamic size from the memref descriptor.

View File

@ -298,14 +298,15 @@ struct MmaSyncOptoNVVM : public ConvertOpToLLVMPattern<nvgpu::MmaSyncOp> {
FailureOr<NVVM::MMATypes> ptxTypeB = getNvvmMmaType(bType);
if (failed(ptxTypeB))
return op->emitOpError("failed to deduce operand PTX types");
Optional<NVVM::MMATypes> ptxTypeC = NVVM::MmaOp::inferOperandMMAType(
cType.getElementType(), /*isAccumulator=*/true);
std::optional<NVVM::MMATypes> ptxTypeC =
NVVM::MmaOp::inferOperandMMAType(cType.getElementType(),
/*isAccumulator=*/true);
if (!ptxTypeC)
return op->emitError(
"could not infer the PTX type for the accumulator/result");
// TODO: add an attribute to the op to customize this behavior.
Optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
std::optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
if (aType.getElementType().isa<IntegerType>())
overflow = NVVM::MMAIntOverflow::satfinite;
@ -413,7 +414,7 @@ buildMmaSparseAsmString(const std::array<int64_t, 3> &shape, unsigned matASize,
unsigned matBSize, unsigned matCSize,
NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB,
NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD,
Optional<NVVM::MMAIntOverflow> overflow) {
std::optional<NVVM::MMAIntOverflow> overflow) {
auto ptxTypeStr = [](NVVM::MMATypes ptxType) {
return NVVM::stringifyMMATypes(ptxType);
};
@ -449,7 +450,7 @@ buildMmaSparseAsmString(const std::array<int64_t, 3> &shape, unsigned matASize,
static FailureOr<LLVM::InlineAsmOp> emitMmaSparseSyncOpAsm(
Location loc, NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB,
NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD,
Optional<NVVM::MMAIntOverflow> overflow, ArrayRef<Value> unpackedAData,
std::optional<NVVM::MMAIntOverflow> overflow, ArrayRef<Value> unpackedAData,
ArrayRef<Value> unpackedB, ArrayRef<Value> unpackedC, Value indexData,
int64_t metadataSelector, const std::array<int64_t, 3> &shape,
Type intrinsicResultType, ConversionPatternRewriter &rewriter) {
@ -505,8 +506,9 @@ struct NVGPUMmaSparseSyncLowering
FailureOr<NVVM::MMATypes> ptxTypeB = getNvvmMmaType(bType);
if (failed(ptxTypeB))
return op->emitOpError("failed to deduce operand PTX types");
Optional<NVVM::MMATypes> ptxTypeC = NVVM::MmaOp::inferOperandMMAType(
cType.getElementType(), /*isAccumulator=*/true);
std::optional<NVVM::MMATypes> ptxTypeC =
NVVM::MmaOp::inferOperandMMAType(cType.getElementType(),
/*isAccumulator=*/true);
if (!ptxTypeC)
return op->emitError(
"could not infer the PTX type for the accumulator/result");
@ -517,7 +519,7 @@ struct NVGPUMmaSparseSyncLowering
return failure();
// TODO: add an attribute to the op to customize this behavior.
Optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
std::optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
if (aType.getElementType().isa<IntegerType>())
overflow = NVVM::MMAIntOverflow::satfinite;

View File

@ -595,7 +595,7 @@ void PatternLowering::generate(SuccessNode *successNode, Block *&currentBlock) {
// Grab the root kind if present.
StringAttr rootKindAttr;
if (pdl::OperationOp rootOp = root.getDefiningOp<pdl::OperationOp>())
if (Optional<StringRef> rootKind = rootOp.getOpName())
if (std::optional<StringRef> rootKind = rootOp.getOpName())
rootKindAttr = builder.getStringAttr(*rootKind);
builder.setInsertionPointToEnd(currentBlock);

View File

@ -222,7 +222,7 @@ struct OperandPosition
struct OperandGroupPosition
: public PredicateBase<
OperandGroupPosition, Position,
std::tuple<OperationPosition *, Optional<unsigned>, bool>,
std::tuple<OperationPosition *, std::optional<unsigned>, bool>,
Predicates::OperandGroupPos> {
explicit OperandGroupPosition(const KeyTy &key);
@ -233,7 +233,9 @@ struct OperandGroupPosition
/// Returns the group number of this position. If std::nullopt, this group
/// refers to all operands.
Optional<unsigned> getOperandGroupNumber() const { return std::get<1>(key); }
std::optional<unsigned> getOperandGroupNumber() const {
return std::get<1>(key);
}
/// Returns if the operand group has unknown size. If false, the operand group
/// has at max one element.
@ -298,7 +300,7 @@ struct ResultPosition
struct ResultGroupPosition
: public PredicateBase<
ResultGroupPosition, Position,
std::tuple<OperationPosition *, Optional<unsigned>, bool>,
std::tuple<OperationPosition *, std::optional<unsigned>, bool>,
Predicates::ResultGroupPos> {
explicit ResultGroupPosition(const KeyTy &key) : Base(key) {
parent = std::get<0>(key);
@ -311,7 +313,9 @@ struct ResultGroupPosition
/// Returns the group number of this position. If std::nullopt, this group
/// refers to all results.
Optional<unsigned> getResultGroupNumber() const { return std::get<1>(key); }
std::optional<unsigned> getResultGroupNumber() const {
return std::get<1>(key);
}
/// Returns if the result group has unknown size. If false, the result group
/// has at max one element.
@ -595,7 +599,7 @@ public:
}
/// Returns a position for a group of operands of the given operation.
Position *getOperandGroup(OperationPosition *p, Optional<unsigned> group,
Position *getOperandGroup(OperationPosition *p, std::optional<unsigned> group,
bool isVariadic) {
return OperandGroupPosition::get(uniquer, p, group, isVariadic);
}
@ -609,7 +613,7 @@ public:
}
/// Returns a position for a group of results of the given operation.
Position *getResultGroup(OperationPosition *p, Optional<unsigned> group,
Position *getResultGroup(OperationPosition *p, std::optional<unsigned> group,
bool isVariadic) {
return ResultGroupPosition::get(uniquer, p, group, isVariadic);
}

View File

@ -81,7 +81,7 @@ static void getOperandTreePredicates(std::vector<PositionalPredicate> &predList,
builder.getType(pos));
})
.Case<pdl::ResultOp, pdl::ResultsOp>([&](auto op) {
Optional<unsigned> index = op.getIndex();
std::optional<unsigned> index = op.getIndex();
// Prevent traversal into a null value if the result has a proper index.
if (index)
@ -106,11 +106,11 @@ static void getOperandTreePredicates(std::vector<PositionalPredicate> &predList,
});
}
static void getTreePredicates(std::vector<PositionalPredicate> &predList,
Value val, PredicateBuilder &builder,
DenseMap<Value, Position *> &inputs,
OperationPosition *pos,
Optional<unsigned> ignoreOperand = std::nullopt) {
static void
getTreePredicates(std::vector<PositionalPredicate> &predList, Value val,
PredicateBuilder &builder,
DenseMap<Value, Position *> &inputs, OperationPosition *pos,
std::optional<unsigned> ignoreOperand = std::nullopt) {
assert(val.getType().isa<pdl::OperationType>() && "expected operation");
pdl::OperationOp op = cast<pdl::OperationOp>(val.getDefiningOp());
OperationPosition *opPos = cast<OperationPosition>(pos);
@ -120,7 +120,7 @@ static void getTreePredicates(std::vector<PositionalPredicate> &predList,
predList.emplace_back(pos, builder.getIsNotNull());
// Check that this is the correct root operation.
if (Optional<StringRef> opName = op.getOpName())
if (std::optional<StringRef> opName = op.getOpName())
predList.emplace_back(pos, builder.getOperationName(*opName));
// Check that the operation has the proper number of operands. If there are
@ -302,7 +302,7 @@ static void getResultPredicates(pdl::ResultsOp op,
// Ensure that the result isn't null if the result has an index.
auto *parentPos = cast<OperationPosition>(inputs.lookup(op.getParent()));
bool isVariadic = op.getType().isa<pdl::RangeType>();
Optional<unsigned> index = op.getIndex();
std::optional<unsigned> index = op.getIndex();
resultPos = builder.getResultGroup(parentPos, index, isVariadic);
if (index)
predList.emplace_back(resultPos, builder.getIsNotNull());
@ -356,7 +356,7 @@ namespace {
/// An op accepting a value at an optional index.
struct OpIndex {
Value parent;
Optional<unsigned> index;
std::optional<unsigned> index;
};
/// The parent and operand index of each operation for each root, stored
@ -408,12 +408,13 @@ static void buildCostGraph(ArrayRef<Value> roots, RootOrderingGraph &graph,
// * the operand index of the value in its parent;
// * the depth of the visited value.
struct Entry {
Entry(Value value, Value parent, Optional<unsigned> index, unsigned depth)
Entry(Value value, Value parent, std::optional<unsigned> index,
unsigned depth)
: value(value), parent(parent), index(index), depth(depth) {}
Value value;
Value parent;
Optional<unsigned> index;
std::optional<unsigned> index;
unsigned depth;
};

View File

@ -335,7 +335,7 @@ static bool isDimOpValidSymbol(ShapedDimOpInterface dimOp, Region *region) {
// The dim op is also okay if its operand memref is a view/subview whose
// corresponding size is a valid symbol.
Optional<int64_t> index = getConstantIntValue(dimOp.getDimension());
std::optional<int64_t> index = getConstantIntValue(dimOp.getDimension());
assert(index.has_value() &&
"expect only `dim` operations with a constant index");
int64_t i = index.value();
@ -923,7 +923,7 @@ template <typename OpTy, typename... Args>
static std::enable_if_t<OpTy::template hasTrait<OpTrait::OneResult>(),
OpFoldResult>
createOrFold(OpBuilder &b, Location loc, ValueRange operands,
Args &&... leadingArguments) {
Args &&...leadingArguments) {
// Identify the constant operands and extract their values as attributes.
// Note that we cannot use the original values directly because the list of
// operands may have changed due to canonicalization and composition.
@ -2009,7 +2009,7 @@ static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
namespace {
/// Returns constant trip count in trivial cases.
static Optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
static std::optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
int64_t step = forOp.getStep();
if (!forOp.hasConstantBounds() || step <= 0)
return std::nullopt;
@ -2030,7 +2030,7 @@ struct AffineForEmptyLoopFolder : public OpRewritePattern<AffineForOp> {
return failure();
if (forOp.getNumResults() == 0)
return success();
Optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
std::optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
if (tripCount && *tripCount == 0) {
// The initial values of the iteration arguments would be the op's
// results.
@ -2082,7 +2082,8 @@ void AffineForOp::getCanonicalizationPatterns(RewritePatternSet &results,
/// correspond to the loop iterator operands, i.e., those excluding the
/// induction variable. AffineForOp only has one region, so zero is the only
/// valid value for `index`.
OperandRange AffineForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange
AffineForOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
assert((!index || *index == 0) && "invalid region index");
// The initial operands map to the loop arguments after the induction
@ -2096,14 +2097,14 @@ OperandRange AffineForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void AffineForOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
assert((!index.has_value() || index.value() == 0) && "expected loop region");
// The loop may typically branch back to its body or to the parent operation.
// If the predecessor is the parent op and the trip count is known to be at
// least one, branch into the body using the iterator arguments. And in cases
// we know the trip count is zero, it can only branch back to its parent.
Optional<uint64_t> tripCount = getTrivialConstantTripCount(*this);
std::optional<uint64_t> tripCount = getTrivialConstantTripCount(*this);
if (!index.has_value() && tripCount.has_value()) {
if (tripCount.value() > 0) {
regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs()));
@ -2130,7 +2131,7 @@ void AffineForOp::getSuccessorRegions(
/// Returns true if the affine.for has zero iterations in trivial cases.
static bool hasTrivialZeroTripCount(AffineForOp op) {
Optional<uint64_t> tripCount = getTrivialConstantTripCount(op);
std::optional<uint64_t> tripCount = getTrivialConstantTripCount(op);
return tripCount && *tripCount == 0;
}
@ -2262,23 +2263,23 @@ bool AffineForOp::matchingBoundOperandList() {
Region &AffineForOp::getLoopBody() { return getRegion(); }
Optional<Value> AffineForOp::getSingleInductionVar() {
std::optional<Value> AffineForOp::getSingleInductionVar() {
return getInductionVar();
}
Optional<OpFoldResult> AffineForOp::getSingleLowerBound() {
std::optional<OpFoldResult> AffineForOp::getSingleLowerBound() {
if (!hasConstantLowerBound())
return std::nullopt;
OpBuilder b(getContext());
return OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound()));
}
Optional<OpFoldResult> AffineForOp::getSingleStep() {
std::optional<OpFoldResult> AffineForOp::getSingleStep() {
OpBuilder b(getContext());
return OpFoldResult(b.getI64IntegerAttr(getStep()));
}
Optional<OpFoldResult> AffineForOp::getSingleUpperBound() {
std::optional<OpFoldResult> AffineForOp::getSingleUpperBound() {
if (!hasConstantUpperBound())
return std::nullopt;
OpBuilder b(getContext());
@ -2541,7 +2542,7 @@ struct AlwaysTrueOrFalseIf : public OpRewritePattern<AffineIfOp> {
/// AffineIfOp has two regions -- `then` and `else`. The flow of data should be
/// as follows: AffineIfOp -> `then`/`else` -> AffineIfOp
void AffineIfOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is an AffineIfOp, then branching into both `then` and
// `else` region is valid.
@ -3567,7 +3568,7 @@ AffineValueMap AffineParallelOp::getUpperBoundsValueMap() {
return AffineValueMap(getUpperBoundsMap(), getUpperBoundsOperands());
}
Optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
std::optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
if (hasMinMaxBounds())
return std::nullopt;
@ -3985,7 +3986,7 @@ ParseResult AffineParallelOp::parse(OpAsmParser &parser,
if (parser.parseAttribute(attrVal, builder.getNoneType(), "reduce",
attrStorage))
return failure();
llvm::Optional<arith::AtomicRMWKind> reduction =
std::optional<arith::AtomicRMWKind> reduction =
arith::symbolizeAtomicRMWKind(attrVal.getValue());
if (!reduction)
return parser.emitError(loc, "invalid reduction value: ") << attrVal;
@ -4231,7 +4232,7 @@ void AffineDelinearizeIndexOp::build(OpBuilder &builder, OperationState &result,
result.addOperands(linearIndex);
SmallVector<Value> basisValues =
llvm::to_vector(llvm::map_range(basis, [&](OpFoldResult ofr) -> Value {
Optional<int64_t> staticDim = getConstantIntValue(ofr);
std::optional<int64_t> staticDim = getConstantIntValue(ofr);
if (staticDim.has_value())
return builder.create<arith::ConstantIndexOp>(result.location,
*staticDim);

View File

@ -246,7 +246,8 @@ void arith::AddIOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
// AddUIExtendedOp
//===----------------------------------------------------------------------===//
Optional<SmallVector<int64_t, 4>> arith::AddUIExtendedOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>>
arith::AddUIExtendedOp::getShapeForUnroll() {
if (auto vt = getType(0).dyn_cast<VectorType>())
return llvm::to_vector<4>(vt.getShape());
return std::nullopt;
@ -378,7 +379,8 @@ OpFoldResult arith::MulIOp::fold(ArrayRef<Attribute> operands) {
// MulSIExtendedOp
//===----------------------------------------------------------------------===//
Optional<SmallVector<int64_t, 4>> arith::MulSIExtendedOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>>
arith::MulSIExtendedOp::getShapeForUnroll() {
if (auto vt = getType(0).dyn_cast<VectorType>())
return llvm::to_vector<4>(vt.getShape());
return std::nullopt;
@ -424,7 +426,8 @@ void arith::MulSIExtendedOp::getCanonicalizationPatterns(
// MulUIExtendedOp
//===----------------------------------------------------------------------===//
Optional<SmallVector<int64_t, 4>> arith::MulUIExtendedOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>>
arith::MulUIExtendedOp::getShapeForUnroll() {
if (auto vt = getType(0).dyn_cast<VectorType>())
return llvm::to_vector<4>(vt.getShape());
return std::nullopt;
@ -1639,7 +1642,7 @@ static Attribute getBoolAttribute(Type type, MLIRContext *ctx, bool value) {
return DenseElementsAttr::get(shapedType, boolAttr);
}
static Optional<int64_t> getIntegerWidth(Type t) {
static std::optional<int64_t> getIntegerWidth(Type t) {
if (auto intType = t.dyn_cast<IntegerType>()) {
return intType.getWidth();
}
@ -1661,7 +1664,7 @@ OpFoldResult arith::CmpIOp::fold(ArrayRef<Attribute> operands) {
if (matchPattern(getRhs(), m_Zero())) {
if (auto extOp = getLhs().getDefiningOp<ExtSIOp>()) {
// extsi(%x : i1 -> iN) != 0 -> %x
Optional<int64_t> integerWidth =
std::optional<int64_t> integerWidth =
getIntegerWidth(extOp.getOperand().getType());
if (integerWidth && integerWidth.value() == 1 &&
getPredicate() == arith::CmpIPredicate::ne)
@ -1669,7 +1672,7 @@ OpFoldResult arith::CmpIOp::fold(ArrayRef<Attribute> operands) {
}
if (auto extOp = getLhs().getDefiningOp<ExtUIOp>()) {
// extui(%x : i1 -> iN) != 0 -> %x
Optional<int64_t> integerWidth =
std::optional<int64_t> integerWidth =
getIntegerWidth(extOp.getOperand().getType());
if (integerWidth && integerWidth.value() == 1 &&
getPredicate() == arith::CmpIPredicate::ne)

View File

@ -53,7 +53,7 @@ LogicalResult YieldOp::verify() {
}
MutableOperandRange
YieldOp::getMutableSuccessorOperands(Optional<unsigned> index) {
YieldOp::getMutableSuccessorOperands(std::optional<unsigned> index) {
return getOperandsMutable();
}
@ -63,7 +63,8 @@ YieldOp::getMutableSuccessorOperands(Optional<unsigned> index) {
constexpr char kOperandSegmentSizesAttr[] = "operand_segment_sizes";
OperandRange ExecuteOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange
ExecuteOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
assert(index && *index == 0 && "invalid region index");
return getBodyOperands();
}
@ -77,7 +78,7 @@ bool ExecuteOp::areTypesCompatible(Type lhs, Type rhs) {
return getValueOrTokenType(lhs) == getValueOrTokenType(rhs);
}
void ExecuteOp::getSuccessorRegions(Optional<unsigned> index,
void ExecuteOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute>,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `body` region branch back to the parent operation.

View File

@ -348,7 +348,7 @@ struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {
LogicalResult matchAndRewrite(tensor::DimOp dimOp,
PatternRewriter &rewriter) const override {
Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
std::optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
auto allocTensorOp = dimOp.getSource().getDefiningOp<AllocTensorOp>();
if (!allocTensorOp || !maybeConstantIndex)
return failure();

View File

@ -104,7 +104,7 @@ void BufferViewFlowAnalysis::build(Operation *op) {
successorRegions);
for (RegionSuccessor &successorRegion : successorRegions) {
// Determine the current region index (if any).
Optional<unsigned> regionIndex;
std::optional<unsigned> regionIndex;
Region *regionSuccessor = successorRegion.getSuccessor();
if (regionSuccessor)
regionIndex = regionSuccessor->getRegionNumber();

View File

@ -595,7 +595,7 @@ SuccessorOperands SwitchOp::getSuccessorOperands(unsigned index) {
}
Block *SwitchOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
Optional<DenseIntElementsAttr> caseValues = getCaseValues();
std::optional<DenseIntElementsAttr> caseValues = getCaseValues();
if (!caseValues)
return getDefaultDestination();
@ -805,7 +805,8 @@ simplifySwitchFromSwitchOnSameCondition(SwitchOp op,
SuccessorRange predDests = predSwitch.getCaseDestinations();
auto it = llvm::find(predDests, currentBlock);
if (it != predDests.end()) {
Optional<DenseIntElementsAttr> predCaseValues = predSwitch.getCaseValues();
std::optional<DenseIntElementsAttr> predCaseValues =
predSwitch.getCaseValues();
foldSwitch(op, rewriter,
predCaseValues->getValues<APInt>()[it - predDests.begin()]);
} else {

View File

@ -84,7 +84,7 @@ LogicalResult emitc::CallOp::verify() {
if (getCallee().empty())
return emitOpError("callee must not be empty");
if (Optional<ArrayAttr> argsAttr = getArgs()) {
if (std::optional<ArrayAttr> argsAttr = getArgs()) {
for (Attribute arg : *argsAttr) {
auto intAttr = arg.dyn_cast<IntegerAttr>();
if (intAttr && intAttr.getType().isa<IndexType>()) {
@ -102,7 +102,7 @@ LogicalResult emitc::CallOp::verify() {
}
}
if (Optional<ArrayAttr> templateArgsAttr = getTemplateArgs()) {
if (std::optional<ArrayAttr> templateArgsAttr = getTemplateArgs()) {
for (Attribute tArg : *templateArgsAttr) {
if (!tArg.isa<TypeAttr, IntegerAttr, FloatAttr, emitc::OpaqueAttr>())
return emitOpError("template argument has invalid type");

View File

@ -367,7 +367,8 @@ static ParseResult parseAllReduceOperation(AsmParser &parser,
AllReduceOperationAttr &attr) {
StringRef enumStr;
if (!parser.parseOptionalKeyword(&enumStr)) {
Optional<AllReduceOperation> op = gpu::symbolizeAllReduceOperation(enumStr);
std::optional<AllReduceOperation> op =
gpu::symbolizeAllReduceOperation(enumStr);
if (!op)
return parser.emitError(parser.getCurrentLocation(), "invalid op kind");
attr = AllReduceOperationAttr::get(parser.getContext(), *op);

View File

@ -26,8 +26,8 @@ using namespace mlir::transform;
/// Check if given mapping attributes are one of the desired attributes
static DiagnosedSilenceableFailure
checkAttributeType(ArrayRef<DeviceMappingAttrInterface> threadMappingAttributes,
const Optional<ArrayAttr> &foreachMapping,
Optional<TransformOpInterface> transformOp) {
const std::optional<ArrayAttr> &foreachMapping,
std::optional<TransformOpInterface> transformOp) {
if (!foreachMapping.has_value())
return transformOp->emitSilenceableError() << "mapping must be present";
@ -52,11 +52,11 @@ checkAttributeType(ArrayRef<DeviceMappingAttrInterface> threadMappingAttributes,
/// Determines if the size of the kernel configuration is supported by the GPU
/// architecture being used. It presently makes use of CUDA limitations, however
/// that aspect may be enhanced for other GPUs.
static DiagnosedSilenceableFailure
checkGpuLimits(TransformOpInterface transformOp, Optional<int64_t> gridDimX,
Optional<int64_t> gridDimY, Optional<int64_t> gridDimZ,
Optional<int64_t> blockDimX, Optional<int64_t> blockDimY,
Optional<int64_t> blockDimZ) {
static DiagnosedSilenceableFailure checkGpuLimits(
TransformOpInterface transformOp, std::optional<int64_t> gridDimX,
std::optional<int64_t> gridDimY, std::optional<int64_t> gridDimZ,
std::optional<int64_t> blockDimX, std::optional<int64_t> blockDimY,
std::optional<int64_t> blockDimZ) {
static constexpr int maxTotalBlockdim = 1024;
static constexpr int maxBlockdimx = 1024;
@ -92,12 +92,12 @@ checkGpuLimits(TransformOpInterface transformOp, Optional<int64_t> gridDimX,
static DiagnosedSilenceableFailure
createGpuLaunch(RewriterBase &rewriter, Location loc,
TransformOpInterface transformOp, LaunchOp &launchOp,
Optional<int64_t> gridDimX = std::nullopt,
Optional<int64_t> gridDimY = std::nullopt,
Optional<int64_t> gridDimZ = std::nullopt,
Optional<int64_t> blockDimX = std::nullopt,
Optional<int64_t> blockDimY = std::nullopt,
Optional<int64_t> blockDimZ = std::nullopt) {
std::optional<int64_t> gridDimX = std::nullopt,
std::optional<int64_t> gridDimY = std::nullopt,
std::optional<int64_t> gridDimZ = std::nullopt,
std::optional<int64_t> blockDimX = std::nullopt,
std::optional<int64_t> blockDimY = std::nullopt,
std::optional<int64_t> blockDimZ = std::nullopt) {
DiagnosedSilenceableFailure diag =
checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX,
blockDimY, blockDimZ);
@ -126,12 +126,12 @@ createGpuLaunch(RewriterBase &rewriter, Location loc,
static DiagnosedSilenceableFailure
alterGpuLaunch(TrivialPatternRewriter &rewriter, LaunchOp gpuLaunch,
TransformOpInterface transformOp,
Optional<int64_t> gridDimX = std::nullopt,
Optional<int64_t> gridDimY = std::nullopt,
Optional<int64_t> gridDimZ = std::nullopt,
Optional<int64_t> blockDimX = std::nullopt,
Optional<int64_t> blockDimY = std::nullopt,
Optional<int64_t> blockDimZ = std::nullopt) {
std::optional<int64_t> gridDimX = std::nullopt,
std::optional<int64_t> gridDimY = std::nullopt,
std::optional<int64_t> gridDimZ = std::nullopt,
std::optional<int64_t> blockDimX = std::nullopt,
std::optional<int64_t> blockDimY = std::nullopt,
std::optional<int64_t> blockDimZ = std::nullopt) {
DiagnosedSilenceableFailure diag =
checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX,
blockDimY, blockDimZ);
@ -370,7 +370,7 @@ transform::MapForeachToBlocks::applyToOne(Operation *target,
static DiagnosedSilenceableFailure rewriteOneForeachThreadToGpuThreads(
RewriterBase &rewriter, scf::ForeachThreadOp foreachThreadOp,
const SmallVectorImpl<int64_t> &globalBlockDims, bool syncAfterDistribute,
llvm::Optional<TransformOpInterface> transformOp,
std::optional<TransformOpInterface> transformOp,
const ArrayRef<DeviceMappingAttrInterface> &threadMappingAttributes) {
// Step 0. Target-specific verifications. There is no good place to anchor
// those right now: the ForeachThreadOp is target-independent and the
@ -502,7 +502,7 @@ static DiagnosedSilenceableFailure rewriteOneForeachThreadToGpuThreads(
DiagnosedSilenceableFailure mlir::transform::gpu::mapNestedForeachToThreadsImpl(
RewriterBase &rewriter, Operation *target,
const SmallVectorImpl<int64_t> &blockDim, bool syncAfterDistribute,
llvm::Optional<TransformOpInterface> transformOp,
std::optional<TransformOpInterface> transformOp,
const ArrayRef<DeviceMappingAttrInterface> &threadMappingAttributes) {
DiagnosedSilenceableFailure diag = DiagnosedSilenceableFailure::success();
target->walk([&](scf::ForeachThreadOp foreachThreadOp) {

View File

@ -147,7 +147,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
// Replace the string attribute `predicate` with an integer attribute.
int64_t predicateValue = 0;
if (std::is_same<CmpPredicateType, ICmpPredicate>()) {
Optional<ICmpPredicate> predicate =
std::optional<ICmpPredicate> predicate =
symbolizeICmpPredicate(predicateAttr.getValue());
if (!predicate)
return parser.emitError(predicateLoc)
@ -155,7 +155,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
<< "' is an incorrect value of the 'predicate' attribute";
predicateValue = static_cast<int64_t>(*predicate);
} else {
Optional<FCmpPredicate> predicate =
std::optional<FCmpPredicate> predicate =
symbolizeFCmpPredicate(predicateAttr.getValue());
if (!predicate)
return parser.emitError(predicateLoc)
@ -253,7 +253,7 @@ ParseResult AllocaOp::parse(OpAsmParser &parser, OperationState &result) {
/// Checks that the elemental type is present in either the pointer type or
/// the attribute, but not both.
static LogicalResult verifyOpaquePtr(Operation *op, LLVMPointerType ptrType,
Optional<Type> ptrElementType) {
std::optional<Type> ptrElementType) {
if (ptrType.isOpaque() && !ptrElementType.has_value()) {
return op->emitOpError() << "expected '" << kElemTypeAttrName
<< "' attribute if opaque pointer type is used";
@ -665,7 +665,7 @@ LogicalResult LLVM::GEPOp::verify() {
}
Type LLVM::GEPOp::getSourceElementType() {
if (Optional<Type> elemType = getElemType())
if (std::optional<Type> elemType = getElemType())
return *elemType;
return extractVectorElementType(getBase().getType())
@ -1853,7 +1853,7 @@ LogicalResult GlobalOp::verify() {
}
}
Optional<uint64_t> alignAttr = getAlignment();
std::optional<uint64_t> alignAttr = getAlignment();
if (alignAttr.has_value()) {
uint64_t value = alignAttr.value();
if (!llvm::isPowerOf2_64(value))

View File

@ -77,8 +77,8 @@ LogicalResult CpAsyncOp::verify() {
// Given the element type of an operand and whether or not it is an accumulator,
// this function returns the PTX type (`NVVM::MMATypes`) that corresponds to the
// operand's element type.
Optional<mlir::NVVM::MMATypes> MmaOp::inferOperandMMAType(Type operandElType,
bool isAccumulator) {
std::optional<mlir::NVVM::MMATypes>
MmaOp::inferOperandMMAType(Type operandElType, bool isAccumulator) {
auto half2Type =
LLVM::getFixedVectorType(Float16Type::get(operandElType.getContext()), 2);
if (operandElType.isF64())
@ -118,14 +118,14 @@ static bool isIntegerPtxType(MMATypes type) {
}
MMATypes MmaOp::accumPtxType() {
Optional<mlir::NVVM::MMATypes> val = inferOperandMMAType(
std::optional<mlir::NVVM::MMATypes> val = inferOperandMMAType(
getODSOperands(2).getTypes().front(), /*isAccum=*/true);
assert(val.has_value() && "accumulator PTX type should always be inferrable");
return val.value();
}
MMATypes MmaOp::resultPtxType() {
Optional<mlir::NVVM::MMATypes> val =
std::optional<mlir::NVVM::MMATypes> val =
inferOperandMMAType(getResult().getType(), /*isAccum=*/true);
assert(val.has_value() && "result PTX type should always be inferrable");
return val.value();
@ -159,7 +159,7 @@ void MmaOp::print(OpAsmPrinter &p) {
regTypes.push_back(this->getOperand(operandIdx).getType());
}
}
Optional<MMATypes> inferredType =
std::optional<MMATypes> inferredType =
inferOperandMMAType(regTypes.back(), /*isAccum=*/fragIdx >= 2);
if (inferredType)
ignoreAttrNames.push_back(frag.ptxTypeAttr);
@ -191,10 +191,10 @@ void MmaOp::print(OpAsmPrinter &p) {
void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
ValueRange operandA, ValueRange operandB, ValueRange operandC,
ArrayRef<int64_t> shape, Optional<MMAB1Op> b1Op,
Optional<MMAIntOverflow> intOverflow,
Optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
Optional<std::array<MMALayout, 2>> multiplicandLayouts) {
ArrayRef<int64_t> shape, std::optional<MMAB1Op> b1Op,
std::optional<MMAIntOverflow> intOverflow,
std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
std::optional<std::array<MMALayout, 2>> multiplicandLayouts) {
assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
MLIRContext *ctx = builder.getContext();
@ -247,7 +247,7 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
// `->` type($res)
ParseResult MmaOp::parse(OpAsmParser &parser, OperationState &result) {
struct OperandFragment {
Optional<MMATypes> elemtype;
std::optional<MMATypes> elemtype;
SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
SmallVector<Type> regTypes;
};

View File

@ -1569,7 +1569,7 @@ DiagnosedSilenceableFailure transform::tileToForeachThreadOpImpl(
RewriterBase &rewriter, transform::TransformState &state,
TransformOpInterface transformOp, ArrayRef<Operation *> targets,
ArrayRef<OpFoldResult> mixedNumThreads,
ArrayRef<OpFoldResult> mixedTileSizes, Optional<ArrayAttr> mapping,
ArrayRef<OpFoldResult> mixedTileSizes, std::optional<ArrayAttr> mapping,
SmallVector<Operation *> &tileOps, SmallVector<Operation *> &tiledOps) {
if (targets.empty())
return DiagnosedSilenceableFailure::success();

View File

@ -44,7 +44,7 @@ using llvm::MapVector;
static Value allocBuffer(ImplicitLocOpBuilder &b,
const LinalgPromotionOptions &options,
Type elementType, Value allocSize, DataLayout &layout,
Optional<unsigned> alignment = std::nullopt) {
std::optional<unsigned> alignment = std::nullopt) {
auto width = layout.getTypeSize(elementType);
IntegerAttr alignmentAttr;
@ -77,11 +77,10 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
/// no call back to do so is provided. The default is to allocate a
/// memref<..xi8> and return a view to get a memref type of shape
/// boundingSubViewSize.
static Optional<Value>
defaultAllocBufferCallBack(const LinalgPromotionOptions &options,
OpBuilder &builder, memref::SubViewOp subView,
ArrayRef<Value> boundingSubViewSize,
Optional<unsigned> alignment, DataLayout &layout) {
static std::optional<Value> defaultAllocBufferCallBack(
const LinalgPromotionOptions &options, OpBuilder &builder,
memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
std::optional<unsigned> alignment, DataLayout &layout) {
ShapedType viewType = subView.getType();
ImplicitLocOpBuilder b(subView.getLoc(), builder);
auto zero = b.createOrFold<arith::ConstantIndexOp>(0);
@ -136,7 +135,7 @@ struct LinalgOpInstancePromotionOptions {
CopyCallbackFn copyOutFn;
/// Alignment of promoted buffer.
Optional<unsigned> alignment;
std::optional<unsigned> alignment;
};
} // namespace
@ -166,7 +165,7 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
} else {
allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
ArrayRef<Value> boundingSubViewSize,
DataLayout &layout) -> Optional<Value> {
DataLayout &layout) -> std::optional<Value> {
return defaultAllocBufferCallBack(options, b, subViewOp,
boundingSubViewSize, alignment, layout);
};
@ -246,7 +245,8 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
// If a callback is not specified, then use the default implementation for
// allocating the promoted buffer.
Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout);
std::optional<Value> fullLocalView =
allocationFn(b, subView, fullSizes, layout);
if (!fullLocalView)
return failure();
SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));

View File

@ -191,9 +191,9 @@ mlir::linalg::computeMultiTileSizes(OpBuilder &builder, LinalgOp op,
static bool canOmitTileOffsetInBoundsCheck(OpFoldResult tileSize,
OpFoldResult numThreads,
OpFoldResult iterationSize) {
Optional<int64_t> tileSizeConst = getConstantIntValue(tileSize);
Optional<int64_t> numThreadsConst = getConstantIntValue(numThreads);
Optional<int64_t> iterSizeConst = getConstantIntValue(iterationSize);
std::optional<int64_t> tileSizeConst = getConstantIntValue(tileSize);
std::optional<int64_t> numThreadsConst = getConstantIntValue(numThreads);
std::optional<int64_t> iterSizeConst = getConstantIntValue(iterationSize);
if (!tileSizeConst || !numThreadsConst || !iterSizeConst)
return false;
return *tileSizeConst * (*numThreadsConst - 1) < *iterSizeConst;
@ -221,7 +221,7 @@ static void calculateTileOffsetsAndSizes(
RewriterBase &b, Location loc, scf::ForeachThreadOp foreachThreadOp,
ArrayRef<OpFoldResult> numThreads, SmallVector<Range> loopRanges,
bool omitTileOffsetBoundsCheck,
Optional<ArrayRef<OpFoldResult>> nominalTileSizes,
std::optional<ArrayRef<OpFoldResult>> nominalTileSizes,
SmallVector<OpFoldResult> &tiledOffsets,
SmallVector<OpFoldResult> &tiledSizes) {
OpBuilder::InsertionGuard g(b);
@ -302,8 +302,8 @@ static void calculateTileOffsetsAndSizes(
/// assume that `tileSize[i] * (numThread[i] -1) <= dimSize[i]` holds.
static FailureOr<ForeachThreadTilingResult> tileToForeachThreadOpImpl(
RewriterBase &b, TilingInterface op, ArrayRef<OpFoldResult> numThreads,
Optional<ArrayRef<OpFoldResult>> nominalTileSizes,
Optional<ArrayAttr> mapping, bool omitTileOffsetBoundsCheck) {
std::optional<ArrayRef<OpFoldResult>> nominalTileSizes,
std::optional<ArrayAttr> mapping, bool omitTileOffsetBoundsCheck) {
Location loc = op->getLoc();
OpBuilder::InsertionGuard g(b);
@ -399,7 +399,7 @@ static FailureOr<ForeachThreadTilingResult> tileToForeachThreadOpImpl(
FailureOr<ForeachThreadTilingResult>
linalg::tileToForeachThreadOp(RewriterBase &b, TilingInterface op,
ArrayRef<OpFoldResult> numThreads,
Optional<ArrayAttr> mapping) {
std::optional<ArrayAttr> mapping) {
return tileToForeachThreadOpImpl(b, op, numThreads,
/*nominalTileSizes=*/std::nullopt, mapping,
/*omitTileOffsetBoundsCheck=*/false);
@ -408,7 +408,7 @@ linalg::tileToForeachThreadOp(RewriterBase &b, TilingInterface op,
FailureOr<ForeachThreadTilingResult>
linalg::tileToForeachThreadOpUsingTileSizes(RewriterBase &b, TilingInterface op,
ArrayRef<OpFoldResult> tileSizes,
Optional<ArrayAttr> mapping) {
std::optional<ArrayAttr> mapping) {
SmallVector<Range> loopRanges = op.getIterationDomain(b);
unsigned nLoops = loopRanges.size();
SmallVector<OpFoldResult> numThreads;
@ -586,7 +586,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b,
PartialReductionOpInterface op,
ArrayRef<OpFoldResult> numThreads,
ArrayRef<OpFoldResult> tileSizes,
Optional<ArrayAttr> mapping) {
std::optional<ArrayAttr> mapping) {
Location loc = op.getLoc();
OpBuilder::InsertionGuard g(b);

View File

@ -460,7 +460,7 @@ LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
bool zeroSliceGuard = true;
if (controlFn) {
if (Optional<bool> control = controlFn(sliceOp))
if (std::optional<bool> control = controlFn(sliceOp))
zeroSliceGuard = *control;
else
return failure();
@ -501,7 +501,7 @@ static Value getPackOpSourceOrPaddedSource(OpBuilder &builder,
}
// The size is less than or equal to tileSize because outer dims are all 1s.
Optional<int64_t> tileSize =
std::optional<int64_t> tileSize =
getConstantIntValue(tileAndPosMapping.lookup(dim));
assert(tileSize.has_value() && "dynamic inner tile size is not supported");
paddedShape.push_back(tileSize.value());

View File

@ -373,14 +373,13 @@ struct VectorizationResult {
Operation *newOp;
};
llvm::Optional<vector::CombiningKind>
std::optional<vector::CombiningKind>
mlir::linalg::getCombinerOpKind(Operation *combinerOp) {
using ::mlir::vector::CombiningKind;
if (!combinerOp)
return std::nullopt;
return llvm::TypeSwitch<Operation *, llvm::Optional<CombiningKind>>(
combinerOp)
return llvm::TypeSwitch<Operation *, std::optional<CombiningKind>>(combinerOp)
.Case<arith::AddIOp, arith::AddFOp>(
[&](auto op) { return CombiningKind::ADD; })
.Case<arith::AndIOp>([&](auto op) { return CombiningKind::AND; })
@ -1847,7 +1846,7 @@ struct Conv1DGenerator
Operation *reduceOp = matchLinalgReduction(linalgOp.getDpsInitOperand(0));
if (!reduceOp)
return;
llvm::Optional<vector::CombiningKind> maybeKind;
std::optional<vector::CombiningKind> maybeKind;
maybeKind = getCombinerOpKind(reduceOp);
if (!maybeKind || *maybeKind != vector::CombiningKind::ADD)
return;

View File

@ -815,7 +815,7 @@ computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile,
// b. The subshape size is 1. According to the way the loops are set up,
// tensors with "0" dimensions would never be constructed.
int64_t shapeSize = shape[r];
Optional<int64_t> sizeCst = getConstantIntValue(size);
std::optional<int64_t> sizeCst = getConstantIntValue(size);
auto hasTileSizeOne = sizeCst && *sizeCst == 1;
auto dividesEvenly = sizeCst && !ShapedType::isDynamic(shapeSize) &&
((shapeSize % *sizeCst) == 0);

View File

@ -184,7 +184,8 @@ static void constifyIndexValues(
ofr.get<Attribute>().cast<IntegerAttr>().getInt());
continue;
}
Optional<int64_t> maybeConstant = getConstantIntValue(ofr.get<Value>());
std::optional<int64_t> maybeConstant =
getConstantIntValue(ofr.get<Value>());
if (maybeConstant)
ofr = builder.getIndexAttr(*maybeConstant);
}
@ -458,7 +459,7 @@ ParseResult AllocaScopeOp::parse(OpAsmParser &parser, OperationState &result) {
}
void AllocaScopeOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
@ -922,7 +923,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
build(builder, result, source, indexValue);
}
Optional<int64_t> DimOp::getConstantIndex() {
std::optional<int64_t> DimOp::getConstantIndex() {
return getConstantIntValue(getIndex());
}
@ -942,7 +943,7 @@ Speculation::Speculatability DimOp::getSpeculatability() {
LogicalResult DimOp::verify() {
// Assume unknown index to be in range.
Optional<int64_t> index = getConstantIndex();
std::optional<int64_t> index = getConstantIndex();
if (!index)
return success();
@ -977,7 +978,7 @@ static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
/// This accounts for cases where there are multiple unit-dims, but only a
/// subset of those are dropped. For MemRefTypes these can be disambiguated
/// using the strides. If a dimension is dropped the stride must be dropped too.
static llvm::Optional<llvm::SmallBitVector>
static std::optional<llvm::SmallBitVector>
computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
ArrayRef<OpFoldResult> sizes) {
llvm::SmallBitVector unusedDims(originalType.getRank());
@ -1049,7 +1050,7 @@ computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
llvm::SmallBitVector SubViewOp::getDroppedDims() {
MemRefType sourceType = getSourceType();
MemRefType resultType = getType();
llvm::Optional<llvm::SmallBitVector> unusedDims =
std::optional<llvm::SmallBitVector> unusedDims =
computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
assert(unusedDims && "unable to find unused dims of subview");
return *unusedDims;
@ -1364,7 +1365,7 @@ void ExtractAlignedPointerAsIndexOp::getAsmResultNames(
/// The number and type of the results are inferred from the
/// shape of the source.
LogicalResult ExtractStridedMetadataOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
ExtractStridedMetadataOpAdaptor extractAdaptor(operands, attributes, regions);
@ -1625,7 +1626,7 @@ LogicalResult GlobalOp::verify() {
}
}
if (Optional<uint64_t> alignAttr = getAlignment()) {
if (std::optional<uint64_t> alignAttr = getAlignment()) {
uint64_t alignment = *alignAttr;
if (!llvm::isPowerOf2_64(alignment))
@ -2610,7 +2611,7 @@ Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
return inferredType;
// Compute which dimensions are dropped.
Optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
std::optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
computeRankReductionMask(inferredType.getShape(), resultShape);
assert(dimsToProject.has_value() && "invalid rank reduction");
@ -2887,7 +2888,7 @@ static MemRefType getCanonicalSubViewResultType(
auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
mixedSizes, mixedStrides)
.cast<MemRefType>();
llvm::Optional<llvm::SmallBitVector> unusedDims =
std::optional<llvm::SmallBitVector> unusedDims =
computeMemRefRankReductionMask(currentSourceType, currentResultType,
mixedSizes);
// Return nullptr as failure mode.
@ -2970,14 +2971,14 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
// Check offsets are zero.
if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
Optional<int64_t> intValue = getConstantIntValue(ofr);
std::optional<int64_t> intValue = getConstantIntValue(ofr);
return !intValue || intValue.value() != 0;
}))
return false;
// Check strides are one.
if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
Optional<int64_t> intValue = getConstantIntValue(ofr);
std::optional<int64_t> intValue = getConstantIntValue(ofr);
return !intValue || intValue.value() != 1;
}))
return false;
@ -2985,7 +2986,7 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
// Check all size values are static and matches the (static) source shape.
ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
for (const auto &size : llvm::enumerate(mixedSizes)) {
Optional<int64_t> intValue = getConstantIntValue(size.value());
std::optional<int64_t> intValue = getConstantIntValue(size.value());
if (!intValue || *intValue != sourceShape[size.index()])
return false;
}

View File

@ -98,9 +98,9 @@ FailureOr<memref::AllocOp> mlir::memref::multiBuffer(memref::AllocOp allocOp,
}
if (!candidateLoop)
return failure();
llvm::Optional<Value> inductionVar = candidateLoop.getSingleInductionVar();
llvm::Optional<OpFoldResult> lowerBound = candidateLoop.getSingleLowerBound();
llvm::Optional<OpFoldResult> singleStep = candidateLoop.getSingleStep();
std::optional<Value> inductionVar = candidateLoop.getSingleInductionVar();
std::optional<OpFoldResult> lowerBound = candidateLoop.getSingleLowerBound();
std::optional<OpFoldResult> singleStep = candidateLoop.getSingleStep();
if (!inductionVar || !lowerBound || !singleStep)
return failure();
@ -125,13 +125,12 @@ FailureOr<memref::AllocOp> mlir::memref::multiBuffer(memref::AllocOp allocOp,
AffineExpr induc = getAffineDimExpr(0, allocOp.getContext());
unsigned dimCount = 1;
auto getAffineExpr = [&](OpFoldResult e) -> AffineExpr {
if (Optional<int64_t> constValue = getConstantIntValue(e)) {
if (std::optional<int64_t> constValue = getConstantIntValue(e)) {
return getAffineConstantExpr(*constValue, allocOp.getContext());
}
auto value = getOrCreateValue(e, builder, candidateLoop->getLoc());
operands.push_back(value);
return getAffineDimExpr(dimCount++, allocOp.getContext());
};
auto init = getAffineExpr(*lowerBound);
auto step = getAffineExpr(*singleStep);

View File

@ -46,7 +46,7 @@ struct DimOfShapedTypeOpInterface : public OpRewritePattern<OpTy> {
if (!shapedTypeOp)
return failure();
Optional<int64_t> dimIndex = dimOp.getConstantIndex();
std::optional<int64_t> dimIndex = dimOp.getConstantIndex();
if (!dimIndex)
return failure();
@ -88,7 +88,7 @@ struct DimOfReifyRankedShapedTypeOpInterface : public OpRewritePattern<OpTy> {
if (!rankedShapeTypeOp)
return failure();
Optional<int64_t> dimIndex = dimOp.getConstantIndex();
std::optional<int64_t> dimIndex = dimOp.getConstantIndex();
if (!dimIndex)
return failure();

View File

@ -149,7 +149,7 @@ getShmReadAndWriteOps(Operation *parentOp, Value shmMemRef,
MemoryEffectOpInterface iface = dyn_cast<MemoryEffectOpInterface>(op);
if (!iface)
return;
Optional<MemoryEffects::EffectInstance> effect =
std::optional<MemoryEffects::EffectInstance> effect =
iface.getEffectOnValue<MemoryEffects::Read>(shmMemRef);
if (effect) {
readOps.push_back(op);

View File

@ -117,7 +117,7 @@ static ParseResult parseClauseAttr(AsmParser &parser, ClauseAttr &attr) {
SMLoc loc = parser.getCurrentLocation();
if (parser.parseKeyword(&enumStr))
return failure();
if (Optional<ClauseT> enumValue = symbolizeEnum<ClauseT>(enumStr)) {
if (std::optional<ClauseT> enumValue = symbolizeEnum<ClauseT>(enumStr)) {
attr = ClauseAttr::get(parser.getContext(), *enumValue);
return success();
}
@ -173,9 +173,9 @@ static void printLinearClause(OpAsmPrinter &p, Operation *op,
//===----------------------------------------------------------------------===//
// Parser, verifier and printer for Aligned Clause
//===----------------------------------------------------------------------===//
static LogicalResult verifyAlignedClause(Operation *op,
Optional<ArrayAttr> alignmentValues,
OperandRange alignedVariables) {
static LogicalResult
verifyAlignedClause(Operation *op, std::optional<ArrayAttr> alignmentValues,
OperandRange alignedVariables) {
// Check if number of alignment values equals to number of aligned variables
if (!alignedVariables.empty()) {
if (!alignmentValues || alignmentValues->size() != alignedVariables.size())
@ -236,7 +236,7 @@ static ParseResult parseAlignedClause(
static void printAlignedClause(OpAsmPrinter &p, Operation *op,
ValueRange alignedVars,
TypeRange alignedVarTypes,
Optional<ArrayAttr> alignmentValues) {
std::optional<ArrayAttr> alignmentValues) {
for (unsigned i = 0; i < alignedVars.size(); ++i) {
if (i != 0)
p << ", ";
@ -293,11 +293,11 @@ verifyScheduleModifiers(OpAsmParser &parser,
static ParseResult parseScheduleClause(
OpAsmParser &parser, ClauseScheduleKindAttr &scheduleAttr,
ScheduleModifierAttr &scheduleModifier, UnitAttr &simdModifier,
Optional<OpAsmParser::UnresolvedOperand> &chunkSize, Type &chunkType) {
std::optional<OpAsmParser::UnresolvedOperand> &chunkSize, Type &chunkType) {
StringRef keyword;
if (parser.parseKeyword(&keyword))
return failure();
llvm::Optional<mlir::omp::ClauseScheduleKind> schedule =
std::optional<mlir::omp::ClauseScheduleKind> schedule =
symbolizeClauseScheduleKind(keyword);
if (!schedule)
return parser.emitError(parser.getNameLoc()) << " expected schedule kind";
@ -334,7 +334,7 @@ static ParseResult parseScheduleClause(
if (!modifiers.empty()) {
SMLoc loc = parser.getCurrentLocation();
if (Optional<ScheduleModifier> mod =
if (std::optional<ScheduleModifier> mod =
symbolizeScheduleModifier(modifiers[0])) {
scheduleModifier = ScheduleModifierAttr::get(parser.getContext(), *mod);
} else {
@ -396,7 +396,7 @@ parseReductionVarList(OpAsmParser &parser,
static void printReductionVarList(OpAsmPrinter &p, Operation *op,
OperandRange reductionVars,
TypeRange reductionTypes,
Optional<ArrayAttr> reductions) {
std::optional<ArrayAttr> reductions) {
for (unsigned i = 0, e = reductions->size(); i < e; ++i) {
if (i != 0)
p << ", ";
@ -407,7 +407,7 @@ static void printReductionVarList(OpAsmPrinter &p, Operation *op,
/// Verifies Reduction Clause
static LogicalResult verifyReductionVarList(Operation *op,
Optional<ArrayAttr> reductions,
std::optional<ArrayAttr> reductions,
OperandRange reductionVars) {
if (!reductionVars.empty()) {
if (!reductions || reductions->size() != reductionVars.size())

View File

@ -112,7 +112,7 @@ LogicalResult ApplyNativeRewriteOp::verify() {
LogicalResult AttributeOp::verify() {
Value attrType = getValueType();
Optional<Attribute> attrValue = getValue();
std::optional<Attribute> attrValue = getValue();
if (!attrValue) {
if (isa<RewriteOp>((*this)->getParentOp()))
@ -203,7 +203,7 @@ static LogicalResult verifyResultTypesAreInferrable(OperationOp op,
if (resultTypes.empty()) {
// If we don't know the concrete operation, don't attempt any verification.
// We can't make assumptions if we don't know the concrete operation.
Optional<StringRef> rawOpName = op.getOpName();
std::optional<StringRef> rawOpName = op.getOpName();
if (!rawOpName)
return success();
Optional<RegisteredOperationName> opName =
@ -290,7 +290,7 @@ LogicalResult OperationOp::verify() {
}
bool OperationOp::hasTypeInference() {
if (Optional<StringRef> rawOpName = getOpName()) {
if (std::optional<StringRef> rawOpName = getOpName()) {
OperationName opName(*rawOpName, getContext());
return opName.hasInterface<InferTypeOpInterface>();
}
@ -298,7 +298,7 @@ bool OperationOp::hasTypeInference() {
}
bool OperationOp::mightHaveTypeInference() {
if (Optional<StringRef> rawOpName = getOpName()) {
if (std::optional<StringRef> rawOpName = getOpName()) {
OperationName opName(*rawOpName, getContext());
return opName.mightHaveInterface<InferTypeOpInterface>();
}

View File

@ -248,7 +248,7 @@ void ExecuteRegionOp::getCanonicalizationPatterns(RewritePatternSet &results,
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void ExecuteRegionOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is the ExecuteRegionOp, branch into the body.
if (!index) {
@ -265,7 +265,7 @@ void ExecuteRegionOp::getSuccessorRegions(
//===----------------------------------------------------------------------===//
MutableOperandRange
ConditionOp::getMutableSuccessorOperands(Optional<unsigned> index) {
ConditionOp::getMutableSuccessorOperands(std::optional<unsigned> index) {
// Pass all operands except the condition to the successor region.
return getArgsMutable();
}
@ -352,17 +352,19 @@ LogicalResult ForOp::verifyRegions() {
return success();
}
Optional<Value> ForOp::getSingleInductionVar() { return getInductionVar(); }
std::optional<Value> ForOp::getSingleInductionVar() {
return getInductionVar();
}
Optional<OpFoldResult> ForOp::getSingleLowerBound() {
std::optional<OpFoldResult> ForOp::getSingleLowerBound() {
return OpFoldResult(getLowerBound());
}
Optional<OpFoldResult> ForOp::getSingleStep() {
std::optional<OpFoldResult> ForOp::getSingleStep() {
return OpFoldResult(getStep());
}
Optional<OpFoldResult> ForOp::getSingleUpperBound() {
std::optional<OpFoldResult> ForOp::getSingleUpperBound() {
return OpFoldResult(getUpperBound());
}
@ -476,7 +478,7 @@ ForOp mlir::scf::getForInductionVarOwner(Value val) {
/// correspond to the loop iterator operands, i.e., those excluding the
/// induction variable. LoopOp only has one region, so 0 is the only valid value
/// for `index`.
OperandRange ForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange ForOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
assert(index && *index == 0 && "invalid region index");
// The initial operands map to the loop arguments after the induction
@ -489,7 +491,7 @@ OperandRange ForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
/// during the flow of control. `operands` is a set of optional attributes that
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void ForOp::getSuccessorRegions(Optional<unsigned> index,
void ForOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is the ForOp, branch into the body using the iterator
@ -721,7 +723,7 @@ struct ForOpIterArgsFolder : public OpRewritePattern<scf::ForOp> {
/// Util function that tries to compute a constant diff between u and l.
/// Returns std::nullopt when the difference between two AffineValueMap is
/// dynamic.
static Optional<int64_t> computeConstDiff(Value l, Value u) {
static std::optional<int64_t> computeConstDiff(Value l, Value u) {
IntegerAttr clb, cub;
if (matchPattern(l, m_Constant(&clb)) && matchPattern(u, m_Constant(&cub))) {
llvm::APInt lbValue = clb.getValue();
@ -754,7 +756,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
return success();
}
Optional<int64_t> diff =
std::optional<int64_t> diff =
computeConstDiff(op.getLowerBound(), op.getUpperBound());
if (!diff)
return failure();
@ -765,7 +767,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
return success();
}
llvm::Optional<llvm::APInt> maybeStepValue = op.getConstantStep();
std::optional<llvm::APInt> maybeStepValue = op.getConstantStep();
if (!maybeStepValue)
return failure();
@ -1068,7 +1070,7 @@ void ForOp::getCanonicalizationPatterns(RewritePatternSet &results,
LastTensorLoadCanonicalization, ForOpTensorCastFolder>(context);
}
Optional<APInt> ForOp::getConstantStep() {
std::optional<APInt> ForOp::getConstantStep() {
IntegerAttr step;
if (matchPattern(getStep(), m_Constant(&step)))
return step.getValue();
@ -1212,7 +1214,7 @@ ParseResult ForeachThreadOp::parse(OpAsmParser &parser,
void ForeachThreadOp::build(mlir::OpBuilder &builder,
mlir::OperationState &result, ValueRange outputs,
ValueRange numThreads,
Optional<ArrayAttr> mapping) {
std::optional<ArrayAttr> mapping) {
result.addOperands(numThreads);
result.addOperands(outputs);
if (mapping.has_value()) {
@ -1565,7 +1567,7 @@ void IfOp::print(OpAsmPrinter &p) {
/// during the flow of control. `operands` is a set of optional attributes that
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void IfOp::getSuccessorRegions(Optional<unsigned> index,
void IfOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
@ -2723,7 +2725,7 @@ void WhileOp::build(::mlir::OpBuilder &odsBuilder,
afterBuilder(odsBuilder, odsState.location, afterBlock->getArguments());
}
OperandRange WhileOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange WhileOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
assert(index && *index == 0 &&
"WhileOp is expected to branch only to the first region");
@ -2746,7 +2748,7 @@ Block::BlockArgListType WhileOp::getAfterArguments() {
return getAfter().front().getArguments();
}
void WhileOp::getSuccessorRegions(Optional<unsigned> index,
void WhileOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The parent op always branches to the condition region.
@ -3524,7 +3526,7 @@ Block &scf::IndexSwitchOp::getCaseBlock(unsigned idx) {
}
void IndexSwitchOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &successors) {
// All regions branch back to the parent op.
if (index) {

View File

@ -453,8 +453,8 @@ static FailureOr<BaseMemRefType> computeLoopRegionIterArgBufferType(
/// Return `true` if the given loop may have 0 iterations.
bool mayHaveZeroIterations(scf::ForOp forOp) {
Optional<int64_t> lb = getConstantIntValue(forOp.getLowerBound());
Optional<int64_t> ub = getConstantIntValue(forOp.getUpperBound());
std::optional<int64_t> lb = getConstantIntValue(forOp.getLowerBound());
std::optional<int64_t> ub = getConstantIntValue(forOp.getUpperBound());
if (!lb.has_value() || !ub.has_value())
return true;
return *ub <= *lb;
@ -1055,7 +1055,7 @@ struct YieldOpInterface
bool mayHaveZeroIterations(scf::ForeachThreadOp foreachThreadOp) {
int64_t p = 1;
for (Value v : foreachThreadOp.getNumThreads()) {
if (Optional<int64_t> c = getConstantIntValue(v)) {
if (std::optional<int64_t> c = getConstantIntValue(v)) {
p *= *c;
} else {
return true;

View File

@ -66,13 +66,13 @@ fillInterchangeVector(ArrayRef<int64_t> interchangeVector,
// Check if `stride` evenly divides the trip count `size - offset`.
static bool tileDividesIterationDomain(Range loopRange) {
Optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
std::optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
if (!offsetAsInt)
return false;
Optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
std::optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
if (!sizeAsInt)
return false;
Optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
std::optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
if (!strideAsInt)
return false;
return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0);
@ -83,7 +83,7 @@ static bool tileDividesIterationDomain(Range loopRange) {
static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
Range loopRange, Value iv,
Value tileSize) {
Optional<int64_t> ts = getConstantIntValue(tileSize);
std::optional<int64_t> ts = getConstantIntValue(tileSize);
if (ts && ts.value() == 1)
return getAsOpFoldResult(tileSize);
@ -484,10 +484,10 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b,
/// `iter_args` of the outer most that is encountered. Traversing the iter_args
/// indicates that this is a destination operand of the consumer. If there was
/// no loop traversal needed, the second value of the returned tuple is empty.
static std::tuple<OpResult, Optional<OpOperand *>>
static std::tuple<OpResult, std::optional<OpOperand *>>
getUntiledProducerFromSliceSource(OpOperand *source,
ArrayRef<scf::ForOp> loops) {
Optional<OpOperand *> destinationIterArg;
std::optional<OpOperand *> destinationIterArg;
auto loopIt = loops.rbegin();
while (auto iterArg = source->get().dyn_cast<BlockArgument>()) {
scf::ForOp loop = *loopIt;
@ -633,7 +633,7 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
// TODO: This can be modeled better if the `DestinationStyleOpInterface`.
// Update to use that when it does become available.
scf::ForOp outerMostLoop = tileAndFuseResult.loops.front();
Optional<unsigned> iterArgNumber;
std::optional<unsigned> iterArgNumber;
if (destinationIterArg) {
iterArgNumber = outerMostLoop.getIterArgNumberForOpOperand(
*destinationIterArg.value());

View File

@ -218,8 +218,8 @@ addLoopRangeConstraints(FlatAffineValueConstraints &constraints, Value iv,
: constraints.appendSymbolVar(/*num=*/1);
// If loop lower/upper bounds are constant: Add EQ constraint.
Optional<int64_t> lbInt = getConstantIntValue(lb);
Optional<int64_t> ubInt = getConstantIntValue(ub);
std::optional<int64_t> lbInt = getConstantIntValue(lb);
std::optional<int64_t> ubInt = getConstantIntValue(ub);
if (lbInt)
constraints.addBound(IntegerPolyhedron::EQ, symLb, *lbInt);
if (ubInt)

View File

@ -142,15 +142,15 @@ std::string SPIRVDialect::getAttributeName(Decoration decoration) {
// Forward declarations.
template <typename ValTy>
static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser);
static std::optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser);
template <>
Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser);
std::optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser);
template <>
Optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser);
std::optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser);
static Type parseAndVerifyType(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
@ -264,7 +264,7 @@ static LogicalResult parseOptionalArrayStride(const SPIRVDialect &dialect,
return failure();
SMLoc strideLoc = parser.getCurrentLocation();
Optional<unsigned> optStride = parseAndVerify<unsigned>(dialect, parser);
std::optional<unsigned> optStride = parseAndVerify<unsigned>(dialect, parser);
if (!optStride)
return failure();
@ -474,8 +474,8 @@ static Type parseMatrixType(SPIRVDialect const &dialect,
// Specialize this function to parse each of the parameters that define an
// ImageType. By default it assumes this is an enum type.
template <typename ValTy>
static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
static std::optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
StringRef enumSpec;
SMLoc enumLoc = parser.getCurrentLocation();
if (parser.parseKeyword(&enumSpec)) {
@ -489,8 +489,8 @@ static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
}
template <>
Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
std::optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
// TODO: Further verify that the element type can be sampled
auto ty = parseAndVerifyType(dialect, parser);
if (!ty)
@ -499,8 +499,8 @@ Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
}
template <typename IntTy>
static Optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
static std::optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
IntTy offsetVal = std::numeric_limits<IntTy>::max();
if (parser.parseInteger(offsetVal))
return std::nullopt;
@ -508,8 +508,8 @@ static Optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
}
template <>
Optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
std::optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
return parseAndVerifyInteger<unsigned>(dialect, parser);
}
@ -520,7 +520,7 @@ namespace {
// (termination condition) needs partial specialization.
template <typename ParseType, typename... Args>
struct ParseCommaSeparatedList {
Optional<std::tuple<ParseType, Args...>>
std::optional<std::tuple<ParseType, Args...>>
operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const {
auto parseVal = parseAndVerify<ParseType>(dialect, parser);
if (!parseVal)
@ -541,8 +541,8 @@ struct ParseCommaSeparatedList {
// specs to parse the last element of the list.
template <typename ParseType>
struct ParseCommaSeparatedList<ParseType> {
Optional<std::tuple<ParseType>> operator()(SPIRVDialect const &dialect,
DialectAsmParser &parser) const {
std::optional<std::tuple<ParseType>>
operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const {
if (auto value = parseAndVerify<ParseType>(dialect, parser))
return std::tuple<ParseType>(*value);
return std::nullopt;

View File

@ -313,8 +313,8 @@ template <typename MemoryOpTy>
static void printMemoryAccessAttribute(
MemoryOpTy memoryOp, OpAsmPrinter &printer,
SmallVectorImpl<StringRef> &elidedAttrs,
Optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
Optional<uint32_t> alignmentAttrValue = std::nullopt) {
std::optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
std::optional<uint32_t> alignmentAttrValue = std::nullopt) {
// Print optional memory access attribute.
if (auto memAccess = (memoryAccessAtrrValue ? memoryAccessAtrrValue
: memoryOp.getMemoryAccess())) {
@ -343,8 +343,8 @@ template <typename MemoryOpTy>
static void printSourceMemoryAccessAttribute(
MemoryOpTy memoryOp, OpAsmPrinter &printer,
SmallVectorImpl<StringRef> &elidedAttrs,
Optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
Optional<uint32_t> alignmentAttrValue = std::nullopt) {
std::optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
std::optional<uint32_t> alignmentAttrValue = std::nullopt) {
printer << ", ";
@ -912,7 +912,7 @@ static ParseResult parseGroupNonUniformArithmeticOp(OpAsmParser &parser,
parser.parseOperand(valueInfo))
return failure();
Optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
std::optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
if (succeeded(parser.parseOptionalKeyword(kClusterSize))) {
clusterSizeInfo = OpAsmParser::UnresolvedOperand();
if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) ||
@ -3348,7 +3348,7 @@ LogicalResult spirv::MergeOp::verify() {
//===----------------------------------------------------------------------===//
void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
Optional<StringRef> name) {
std::optional<StringRef> name) {
OpBuilder::InsertionGuard guard(builder);
builder.createBlock(state.addRegion());
if (name) {
@ -3360,8 +3360,8 @@ void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
spirv::AddressingModel addressingModel,
spirv::MemoryModel memoryModel,
Optional<VerCapExtAttr> vceTriple,
Optional<StringRef> name) {
std::optional<VerCapExtAttr> vceTriple,
std::optional<StringRef> name) {
state.addAttribute(
"addressing_model",
builder.getAttr<spirv::AddressingModelAttr>(addressingModel));
@ -3414,7 +3414,7 @@ ParseResult spirv::ModuleOp::parse(OpAsmParser &parser,
}
void spirv::ModuleOp::print(OpAsmPrinter &printer) {
if (Optional<StringRef> name = getName()) {
if (std::optional<StringRef> name = getName()) {
printer << ' ';
printer.printSymbolName(*name);
}
@ -3428,7 +3428,7 @@ void spirv::ModuleOp::print(OpAsmPrinter &printer) {
elidedAttrs.assign({addressingModelAttrName, memoryModelAttrName,
mlir::SymbolTable::getSymbolAttrName()});
if (Optional<spirv::VerCapExtAttr> triple = getVceTriple()) {
if (std::optional<spirv::VerCapExtAttr> triple = getVceTriple()) {
printer << " requires " << *triple;
elidedAttrs.push_back(spirv::ModuleOp::getVCETripleAttrName());
}
@ -3806,7 +3806,7 @@ LogicalResult spirv::UnreachableOp::verify() {
ParseResult spirv::VariableOp::parse(OpAsmParser &parser,
OperationState &result) {
// Parse optional initializer
Optional<OpAsmParser::UnresolvedOperand> initInfo;
std::optional<OpAsmParser::UnresolvedOperand> initInfo;
if (succeeded(parser.parseOptionalKeyword("init"))) {
initInfo = OpAsmParser::UnresolvedOperand();
if (parser.parseLParen() || parser.parseOperand(*initInfo) ||

View File

@ -148,7 +148,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
// Specifies the spirv.ExecutionModeOp.
if (DenseI32ArrayAttr workgroupSizeAttr = entryPointAttr.getWorkgroupSize()) {
Optional<ArrayRef<spirv::Capability>> caps =
std::optional<ArrayRef<spirv::Capability>> caps =
spirv::getCapabilities(spirv::ExecutionMode::LocalSize);
if (!caps || targetEnv.allows(*caps)) {
builder.create<spirv::ExecutionModeOp>(funcOp.getLoc(), funcOp,
@ -161,7 +161,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
}
}
if (Optional<int> subgroupSize = entryPointAttr.getSubgroupSize()) {
Optional<ArrayRef<spirv::Capability>> caps =
std::optional<ArrayRef<spirv::Capability>> caps =
spirv::getCapabilities(spirv::ExecutionMode::SubgroupSize);
if (!caps || targetEnv.allows(*caps)) {
builder.create<spirv::ExecutionModeOp>(funcOp.getLoc(), funcOp,

View File

@ -52,8 +52,8 @@ static AliasedResourceMap collectAliasedResources(spirv::ModuleOp moduleOp) {
AliasedResourceMap aliasedResources;
moduleOp->walk([&aliasedResources](spirv::GlobalVariableOp varOp) {
if (varOp->getAttrOfType<UnitAttr>("aliased")) {
Optional<uint32_t> set = varOp.getDescriptorSet();
Optional<uint32_t> binding = varOp.getBinding();
std::optional<uint32_t> set = varOp.getDescriptorSet();
std::optional<uint32_t> binding = varOp.getBinding();
if (set && binding)
aliasedResources[{*set, *binding}].push_back(varOp);
}

View File

@ -335,7 +335,7 @@ void AssumingOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
// See RegionBranchOpInterface in Interfaces/ControlFlowInterfaces.td
void AssumingOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// AssumingOp has unconditional control flow into the region and back to the
// parent, so return the correct RegionSuccessor purely based on the index
@ -394,7 +394,7 @@ void AssumingOp::build(
//===----------------------------------------------------------------------===//
LogicalResult mlir::shape::AddOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<SizeType>() ||
@ -911,7 +911,7 @@ void ConstShapeOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}
LogicalResult mlir::shape::ConstShapeOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
Builder b(context);
@ -1068,7 +1068,7 @@ OpFoldResult CstrRequireOp::fold(ArrayRef<Attribute> operands) {
// DimOp
//===----------------------------------------------------------------------===//
Optional<int64_t> DimOp::getConstantIndex() {
std::optional<int64_t> DimOp::getConstantIndex() {
if (auto constSizeOp = getIndex().getDefiningOp<ConstSizeOp>())
return constSizeOp.getValue().getLimitedValue();
if (auto constantOp = getIndex().getDefiningOp<arith::ConstantOp>())
@ -1081,7 +1081,7 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
auto valShapedType = valType.dyn_cast<ShapedType>();
if (!valShapedType || !valShapedType.hasRank())
return nullptr;
Optional<int64_t> index = getConstantIndex();
std::optional<int64_t> index = getConstantIndex();
if (!index.has_value())
return nullptr;
if (index.value() >= valShapedType.getRank())
@ -1093,7 +1093,7 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
}
LogicalResult mlir::shape::DimOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
DimOpAdaptor dimOp(operands);
@ -1141,7 +1141,7 @@ OpFoldResult DivOp::fold(ArrayRef<Attribute> operands) {
}
LogicalResult mlir::shape::DivOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<SizeType>() ||
@ -1327,7 +1327,7 @@ void FuncOp::print(OpAsmPrinter &p) {
// GetExtentOp
//===----------------------------------------------------------------------===//
Optional<int64_t> GetExtentOp::getConstantDim() {
std::optional<int64_t> GetExtentOp::getConstantDim() {
if (auto constSizeOp = getDim().getDefiningOp<ConstSizeOp>())
return constSizeOp.getValue().getLimitedValue();
if (auto constantOp = getDim().getDefiningOp<arith::ConstantOp>())
@ -1339,7 +1339,7 @@ OpFoldResult GetExtentOp::fold(ArrayRef<Attribute> operands) {
auto elements = operands[0].dyn_cast_or_null<DenseIntElementsAttr>();
if (!elements)
return nullptr;
Optional<int64_t> dim = getConstantDim();
std::optional<int64_t> dim = getConstantDim();
if (!dim.has_value())
return nullptr;
if (dim.value() >= elements.getNumElements())
@ -1362,7 +1362,7 @@ void GetExtentOp::build(OpBuilder &builder, OperationState &result, Value shape,
}
LogicalResult mlir::shape::GetExtentOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.assign({IndexType::get(context)});
@ -1400,7 +1400,7 @@ OpFoldResult IsBroadcastableOp::fold(ArrayRef<Attribute> operands) {
//===----------------------------------------------------------------------===//
LogicalResult mlir::shape::MeetOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands.empty())
@ -1536,7 +1536,7 @@ void shape::RankOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}
LogicalResult mlir::shape::RankOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<ShapeType>())
@ -1572,7 +1572,7 @@ OpFoldResult NumElementsOp::fold(ArrayRef<Attribute> operands) {
}
LogicalResult mlir::shape::NumElementsOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<ShapeType>())
@ -1604,7 +1604,7 @@ OpFoldResult MaxOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
}
LogicalResult mlir::shape::MaxOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType() == operands[1].getType())
@ -1636,7 +1636,7 @@ OpFoldResult MinOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
}
LogicalResult mlir::shape::MinOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType() == operands[1].getType())
@ -1673,7 +1673,7 @@ OpFoldResult MulOp::fold(ArrayRef<Attribute> operands) {
}
LogicalResult mlir::shape::MulOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<SizeType>() ||
@ -1760,7 +1760,7 @@ void ShapeOfOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}
LogicalResult mlir::shape::ShapeOfOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<ValueShapeType>())

View File

@ -333,7 +333,7 @@ IntegerType StorageSpecifierType::getSizesType() const {
}
Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
Optional<unsigned> dim) const {
std::optional<unsigned> dim) const {
if (kind != StorageSpecifierKind::ValMemSize)
assert(dim);
@ -344,8 +344,8 @@ Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
}
Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
Optional<APInt> dim) const {
Optional<unsigned> intDim = std::nullopt;
std::optional<APInt> dim) const {
std::optional<unsigned> intDim = std::nullopt;
if (dim)
intDim = dim.value().getZExtValue();
return getFieldType(kind, intDim);
@ -369,10 +369,9 @@ static LogicalResult isMatchingWidth(Value result, unsigned width) {
return failure();
}
static LogicalResult
verifySparsifierGetterSetter(StorageSpecifierKind mdKind, Optional<APInt> dim,
TypedValue<StorageSpecifierType> md,
Operation *op) {
static LogicalResult verifySparsifierGetterSetter(
StorageSpecifierKind mdKind, std::optional<APInt> dim,
TypedValue<StorageSpecifierType> md, Operation *op) {
if (mdKind == StorageSpecifierKind::ValMemSize && dim) {
return op->emitError(
"redundant dimension argument for querying value memory size");
@ -482,7 +481,7 @@ static SetStorageSpecifierOp getSpecifierSetDef(SpecifierOp op) {
OpFoldResult GetStorageSpecifierOp::fold(ArrayRef<Attribute> operands) {
StorageSpecifierKind kind = getSpecifierKind();
Optional<APInt> dim = getDim();
std::optional<APInt> dim = getDim();
for (auto op = getSpecifierSetDef(*this); op; op = getSpecifierSetDef(op))
if (kind == op.getSpecifierKind() && dim == op.getDim())
return op.getValue();

View File

@ -133,9 +133,10 @@ static scf::ForOp createFor(OpBuilder &builder, Location loc, Value upper,
/// Gets the dimension size for the given sparse tensor at the given
/// original dimension 'dim'. Returns std::nullopt if no sparse encoding is
/// attached to the given tensor type.
static Optional<Value> sizeFromTensorAtDim(OpBuilder &builder, Location loc,
SparseTensorDescriptor desc,
unsigned dim) {
static std::optional<Value> sizeFromTensorAtDim(OpBuilder &builder,
Location loc,
SparseTensorDescriptor desc,
unsigned dim) {
RankedTensorType rtp = desc.getTensorType();
// Access into static dimension can query original type directly.
// Note that this is typically already done by DimOp's folding.
@ -681,7 +682,7 @@ public:
LogicalResult
matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Optional<int64_t> index = op.getConstantIndex();
std::optional<int64_t> index = op.getConstantIndex();
if (!index || !getSparseTensorEncoding(adaptor.getSource().getType()))
return failure();

View File

@ -706,7 +706,7 @@ public:
if (!enc)
return failure();
// Only rewrite DimOp with constant index.
Optional<int64_t> dim = op.getConstantIndex();
std::optional<int64_t> dim = op.getConstantIndex();
if (!dim)
return failure();
// Generate the call.

View File

@ -380,7 +380,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
build(builder, result, source, indexValue);
}
Optional<int64_t> DimOp::getConstantIndex() {
std::optional<int64_t> DimOp::getConstantIndex() {
return getConstantIntValue(getIndex());
}
@ -400,7 +400,7 @@ Speculation::Speculatability DimOp::getSpeculatability() {
LogicalResult DimOp::verify() {
// Assume unknown index to be in range.
Optional<int64_t> index = getConstantIndex();
std::optional<int64_t> index = getConstantIndex();
if (!index)
return success();
@ -598,7 +598,7 @@ struct ReplaceEmptyTensorStaticShapeDims : OpRewritePattern<EmptyOp> {
for (int64_t i = 0; i < op.getType().getRank(); ++i) {
if (op.getType().isDynamicDim(i)) {
Value dynamicSize = op.getDynamicSizes()[ctr++];
Optional<int64_t> cst = getConstantIntValue(dynamicSize);
std::optional<int64_t> cst = getConstantIntValue(dynamicSize);
if (cst.has_value()) {
staticShape[i] = *cst;
changedType = true;
@ -626,7 +626,7 @@ struct FoldEmptyTensorWithDimOp : public OpRewritePattern<DimOp> {
LogicalResult matchAndRewrite(tensor::DimOp dimOp,
PatternRewriter &rewriter) const override {
Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
std::optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
auto emptyTensorOp = dimOp.getSource().getDefiningOp<EmptyOp>();
if (!emptyTensorOp || !maybeConstantIndex)
return failure();
@ -1445,7 +1445,7 @@ struct FoldDimOfExpandShape : public OpRewritePattern<DimOp> {
return failure();
// Only constant dimension values are supported.
Optional<int64_t> dim = dimOp.getConstantIndex();
std::optional<int64_t> dim = dimOp.getConstantIndex();
if (!dim.has_value())
return failure();
@ -1489,7 +1489,7 @@ struct FoldDimOfCollapseShape : public OpRewritePattern<DimOp> {
return failure();
// Only constant dimension values are supported.
Optional<int64_t> dim = dimOp.getConstantIndex();
std::optional<int64_t> dim = dimOp.getConstantIndex();
if (!dim.has_value())
return failure();
@ -1732,7 +1732,7 @@ llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
llvm::SmallBitVector droppedDims(mixedSizes.size());
unsigned shapePos = 0;
for (const auto &size : enumerate(mixedSizes)) {
Optional<int64_t> sizeVal = getConstantIntValue(size.value());
std::optional<int64_t> sizeVal = getConstantIntValue(size.value());
// If the size is not 1, or if the current matched dimension of the result
// is the same static shape as the size value (which is 1), then the
// dimension is preserved.
@ -2278,15 +2278,16 @@ struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertOpTy> {
}))
return failure();
auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
auto getSourceOfCastOp = [](Value v) -> std::optional<Value> {
auto castOp = v.getDefiningOp<tensor::CastOp>();
if (!castOp || !canFoldIntoConsumerOp(castOp))
return std::nullopt;
return castOp.getSource();
};
Optional<Value> sourceCastSource =
std::optional<Value> sourceCastSource =
getSourceOfCastOp(insertSliceOp.getSource());
Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.getDest());
std::optional<Value> destCastSource =
getSourceOfCastOp(insertSliceOp.getDest());
if (!sourceCastSource && !destCastSource)
return failure();
@ -2352,7 +2353,7 @@ struct InsertSliceOpSourceCastInserter final
SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
srcType.getShape().end());
for (int64_t i = 0; i < srcType.getRank(); ++i) {
if (Optional<int64_t> constInt =
if (std::optional<int64_t> constInt =
getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
newSrcShape[i] = *constInt;
}
@ -2419,9 +2420,10 @@ void PadOp::getAsmResultNames(function_ref<void(Value, StringRef)> setNameFn) {
void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
Type typeToInfer, Type typeToInferFrom) {}
ParseResult parseInferType(OpAsmParser &parser,
Optional<OpAsmParser::UnresolvedOperand> optOperand,
Type &typeToInfer, Type typeToInferFrom) {
ParseResult
parseInferType(OpAsmParser &parser,
std::optional<OpAsmParser::UnresolvedOperand> optOperand,
Type &typeToInfer, Type typeToInferFrom) {
if (optOperand)
typeToInfer = typeToInferFrom;
return success();
@ -3151,7 +3153,7 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
llvm::zip(packedType.getShape().take_back(mixedTiles.size()),
mixedTiles),
[](std::tuple<int64_t, OpFoldResult> it) {
Optional<int64_t> constTileSize =
std::optional<int64_t> constTileSize =
getConstantIntValue(std::get<1>(it));
int64_t shape = std::get<0>(it);
if (!constTileSize) {
@ -3232,7 +3234,7 @@ areNotFullTiles(ArrayRef<int64_t> inputShape,
auto it = dimAndTileMapping.find(dim);
if (it == dimAndTileMapping.end())
continue;
Optional<int64_t> constantTile = getConstantIntValue(it->second);
std::optional<int64_t> constantTile = getConstantIntValue(it->second);
if (!constantTile)
continue;
if (inputShape[dim] % (*constantTile) != 0)
@ -3333,7 +3335,7 @@ bool areTilesAndTiledDimsAllConstant(OpTy op) {
SmallVector<OpFoldResult> mixedTiles = op.getMixedTiles();
for (auto [dimDest, tile] : llvm::zip(
packedType.getShape().take_back(mixedTiles.size()), mixedTiles)) {
Optional<int64_t> constTileSize = getConstantIntValue(tile);
std::optional<int64_t> constTileSize = getConstantIntValue(tile);
if (!constTileSize || ShapedType::isDynamic(dimDest))
return false;
}

View File

@ -265,7 +265,7 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp,
info.isAlignedToInnerTileSize = false;
FailureOr<int64_t> cstSize = linalg::getConstantUpperBoundForIndex(
getValueOrCreateConstantIndexOp(b, loc, tileSize));
Optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
std::optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
if (!failed(cstSize) && cstInnerSize) {
if (cstSize.value() % cstInnerSize.value() == 0)
info.isAlignedToInnerTileSize = true;

View File

@ -26,7 +26,7 @@ using namespace mlir;
/// Returns true if the the given `attrOrValue` is a constant zero.
static bool isZero(OpFoldResult attrOrValue) {
if (Optional<int64_t> val = getConstantIntValue(attrOrValue))
if (std::optional<int64_t> val = getConstantIntValue(attrOrValue))
return *val == 0;
return false;
}

View File

@ -373,7 +373,7 @@ static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands,
}
LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
@ -398,7 +398,7 @@ LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
}
LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
// Infer all dimension sizes by reducing based on inputs.
@ -455,7 +455,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
}
LogicalResult tosa::EqualOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outShape;
@ -476,7 +476,7 @@ bool tosa::EqualOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
}
LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
@ -496,9 +496,8 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
}
if (biasShape.hasRank()) {
outShape[1] = outShape[1] == ShapedType::kDynamic
? biasShape.getDimSize(0)
: outShape[1];
outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
: outShape[1];
}
inferredReturnShapes.push_back(ShapedTypeComponents(outShape));
@ -508,7 +507,7 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
LogicalResult FullyConnectedOp::verify() { return verifyConvOp(*this); }
LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor lhsShape = operands.getShape(0);
@ -524,9 +523,8 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
}
if (rhsShape.hasRank()) {
outShape[0] = outShape[0] == ShapedType::kDynamic
? rhsShape.getDimSize(0)
: outShape[0];
outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
: outShape[0];
outShape[2] = rhsShape.getDimSize(2);
}
@ -535,7 +533,7 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
}
LogicalResult tosa::PadOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
@ -597,7 +595,7 @@ static SmallVector<int64_t> convertToMlirShape(ArrayRef<int64_t> shape) {
}
LogicalResult tosa::SliceOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ArrayAttr sizes = SliceOpAdaptor(operands, attributes).getSize();
@ -607,13 +605,13 @@ LogicalResult tosa::SliceOp::inferReturnTypeComponents(
outputShape.push_back(val.cast<IntegerAttr>().getValue().getSExtValue());
}
inferredReturnShapes.push_back(ShapedTypeComponents(
convertToMlirShape(outputShape)));
inferredReturnShapes.push_back(
ShapedTypeComponents(convertToMlirShape(outputShape)));
return success();
}
LogicalResult tosa::TableOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
@ -629,7 +627,7 @@ LogicalResult tosa::TableOp::inferReturnTypeComponents(
}
LogicalResult tosa::TileOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
TileOpAdaptor adaptor(operands, attributes);
@ -663,7 +661,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
}
LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ReshapeOpAdaptor adaptor(operands, attributes);
@ -703,7 +701,7 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
}
LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
@ -770,7 +768,7 @@ LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
}
LogicalResult tosa::GatherOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape;
@ -795,7 +793,7 @@ LogicalResult tosa::GatherOp::inferReturnTypeComponents(
}
LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ResizeOpAdaptor adaptor(operands, attributes);
@ -838,7 +836,7 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
}
LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape;
@ -887,7 +885,7 @@ static LogicalResult ReduceInferReturnTypes(
#define REDUCE_SHAPE_INFER(OP) \
LogicalResult OP::inferReturnTypeComponents( \
MLIRContext *context, ::llvm::Optional<Location> location, \
MLIRContext *context, ::std::optional<Location> location, \
ValueShapeRange operands, DictionaryAttr attributes, \
RegionRange regions, \
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
@ -918,7 +916,7 @@ static LogicalResult NAryInferReturnTypes(
#define NARY_SHAPE_INFER(OP) \
LogicalResult OP::inferReturnTypeComponents( \
MLIRContext *context, ::llvm::Optional<Location> location, \
MLIRContext *context, ::std::optional<Location> location, \
ValueShapeRange operands, DictionaryAttr attributes, \
RegionRange regions, \
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
@ -1007,7 +1005,7 @@ static LogicalResult poolingInferReturnTypes(
}
LogicalResult Conv2DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
@ -1074,7 +1072,7 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
LogicalResult Conv2DOp::verify() { return verifyConvOp(*this); }
LogicalResult Conv3DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamic);
@ -1151,21 +1149,21 @@ LogicalResult Conv3DOp::inferReturnTypeComponents(
LogicalResult Conv3DOp::verify() { return verifyConvOp(*this); }
LogicalResult AvgPool2dOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
return poolingInferReturnTypes(operands, attributes, inferredReturnShapes);
}
LogicalResult MaxPool2dOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
return poolingInferReturnTypes(operands, attributes, inferredReturnShapes);
}
LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
@ -1245,7 +1243,7 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
LogicalResult DepthwiseConv2DOp::verify() { return verifyConvOp(*this); }
LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
@ -1313,7 +1311,7 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
}
LogicalResult IfOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<tosa::YieldOp> yieldOps;
@ -1357,7 +1355,7 @@ LogicalResult IfOp::inferReturnTypeComponents(
}
LogicalResult WhileOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<tosa::YieldOp> yieldOps;

View File

@ -46,7 +46,7 @@ public:
private:
void runOnOperation() override;
llvm::Optional<TosaProfileEnum> profileType;
std::optional<TosaProfileEnum> profileType;
};
void TosaValidation::runOnOperation() {

View File

@ -117,8 +117,8 @@ LogicalResult PatternApplicatorExtension::findAllMatches(
// AlternativesOp
//===----------------------------------------------------------------------===//
OperandRange
transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange transform::AlternativesOp::getSuccessorEntryOperands(
std::optional<unsigned> index) {
if (index && getOperation()->getNumOperands() == 1)
return getOperation()->getOperands();
return OperandRange(getOperation()->operand_end(),
@ -126,7 +126,7 @@ transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
}
void transform::AlternativesOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
for (Region &alternative : llvm::drop_begin(
getAlternatives(), index.has_value() ? *index + 1 : 0)) {
@ -338,7 +338,7 @@ void transform::ForeachOp::getEffects(
}
void transform::ForeachOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
Region *bodyRegion = &getBody();
if (!index) {
@ -353,7 +353,7 @@ void transform::ForeachOp::getSuccessorRegions(
}
OperandRange
transform::ForeachOp::getSuccessorEntryOperands(Optional<unsigned> index) {
transform::ForeachOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
// The iteration variable op handle is mapped to a subset (one op to be
// precise) of the payload ops of the ForeachOp operand.
assert(index && *index == 0 && "unexpected region index");
@ -737,8 +737,8 @@ void transform::SequenceOp::getEffects(
}
}
OperandRange
transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange transform::SequenceOp::getSuccessorEntryOperands(
std::optional<unsigned> index) {
assert(index && *index == 0 && "unexpected region index");
if (getOperation()->getNumOperands() == 1)
return getOperation()->getOperands();
@ -747,7 +747,7 @@ transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
}
void transform::SequenceOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (!index) {
Region *bodyRegion = &getBody();

Some files were not shown because too many files have changed in this diff Show More