diff --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp --- a/flang/lib/Optimizer/Dialect/FIROps.cpp +++ b/flang/lib/Optimizer/Dialect/FIROps.cpp @@ -67,7 +67,7 @@ // AddfOp //===----------------------------------------------------------------------===// -mlir::OpFoldResult fir::AddfOp::fold(llvm::ArrayRef opnds) { +mlir::ValueOrAttr fir::AddfOp::fold(llvm::ArrayRef opnds) { return mlir::constFoldBinaryOp( opnds, [](APFloat a, APFloat b) { return a + b; }); } @@ -119,7 +119,7 @@ // BoxAddrOp //===----------------------------------------------------------------------===// -mlir::OpFoldResult fir::BoxAddrOp::fold(llvm::ArrayRef opnds) { +mlir::ValueOrAttr fir::BoxAddrOp::fold(llvm::ArrayRef opnds) { if (auto v = val().getDefiningOp()) { if (auto box = dyn_cast(v)) return box.memref(); @@ -133,7 +133,7 @@ // BoxCharLenOp //===----------------------------------------------------------------------===// -mlir::OpFoldResult +mlir::ValueOrAttr fir::BoxCharLenOp::fold(llvm::ArrayRef opnds) { if (auto v = val().getDefiningOp()) { if (auto box = dyn_cast(v)) @@ -315,7 +315,7 @@ // ConvertOp //===----------------------------------------------------------------------===// -mlir::OpFoldResult fir::ConvertOp::fold(llvm::ArrayRef opnds) { +mlir::ValueOrAttr fir::ConvertOp::fold(llvm::ArrayRef opnds) { if (value().getType() == getType()) return value(); if (matchPattern(value(), m_Op())) { @@ -958,7 +958,7 @@ // MulfOp //===----------------------------------------------------------------------===// -mlir::OpFoldResult fir::MulfOp::fold(llvm::ArrayRef opnds) { +mlir::ValueOrAttr fir::MulfOp::fold(llvm::ArrayRef opnds) { return mlir::constFoldBinaryOp( opnds, [](APFloat a, APFloat b) { return a * b; }); } @@ -1390,7 +1390,7 @@ // SubfOp //===----------------------------------------------------------------------===// -mlir::OpFoldResult fir::SubfOp::fold(llvm::ArrayRef opnds) { +mlir::ValueOrAttr fir::SubfOp::fold(llvm::ArrayRef opnds) { return mlir::constFoldBinaryOp( opnds, [](APFloat a, APFloat b) { return a - b; }); } diff --git a/mlir/docs/Canonicalization.md b/mlir/docs/Canonicalization.md --- a/mlir/docs/Canonicalization.md +++ b/mlir/docs/Canonicalization.md @@ -127,7 +127,7 @@ /// of the operation. The caller will remove the operation and use that /// result instead. /// -OpFoldResult MyOp::fold(ArrayRef operands) { +ValueOrAttr MyOp::fold(ArrayRef operands) { ... } ``` @@ -149,7 +149,7 @@ /// the operation and use those results instead. /// LogicalResult MyOp::fold(ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { ... } ``` @@ -162,7 +162,7 @@ constant then `operands` will be of the form [Attribute(), b-value, Attribute()]. -Also above, is the use of `OpFoldResult`. This class represents the possible +Also above, is the use of `ValueOrAttr`. This class represents the possible result of folding an operation result: either an SSA `Value`, or an `Attribute`(for a constant result). If an SSA `Value` is provided, it *must* correspond to an existing value. The `fold` methods are not permitted to diff --git a/mlir/docs/Traits.md b/mlir/docs/Traits.md --- a/mlir/docs/Traits.md +++ b/mlir/docs/Traits.md @@ -70,7 +70,7 @@ public: /// Override the 'foldTrait' hook to support trait based folding on the /// concrete operation. - static OpFoldResult foldTrait(Operation *op, ArrayRef operands) { { + static ValueOrAttr foldTrait(Operation *op, ArrayRef operands) { { // ... } }; @@ -87,7 +87,7 @@ /// Override the 'foldTrait' hook to support trait based folding on the /// concrete operation. static LogicalResult foldTrait(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { { + SmallVectorImpl &results) { { // ... } }; diff --git a/mlir/docs/Tutorials/QuickstartRewrites.md b/mlir/docs/Tutorials/QuickstartRewrites.md --- a/mlir/docs/Tutorials/QuickstartRewrites.md +++ b/mlir/docs/Tutorials/QuickstartRewrites.md @@ -77,7 +77,7 @@ have a folder, the constant folder also needs to be added, e.g.,: ```c++ -OpFoldResult SpecificOp::fold(ArrayRef constOperands) { +ValueOrAttr SpecificOp::fold(ArrayRef constOperands) { if (unable_to_fold) return {}; .... diff --git a/mlir/docs/Tutorials/Toy/Ch-7.md b/mlir/docs/Tutorials/Toy/Ch-7.md --- a/mlir/docs/Tutorials/Toy/Ch-7.md +++ b/mlir/docs/Tutorials/Toy/Ch-7.md @@ -450,15 +450,15 @@ ```c++ /// Fold constants. -OpFoldResult ConstantOp::fold(ArrayRef operands) { return value(); } +ValueOrAttr ConstantOp::fold(ArrayRef operands) { return value(); } /// Fold struct constants. -OpFoldResult StructConstantOp::fold(ArrayRef operands) { +ValueOrAttr StructConstantOp::fold(ArrayRef operands) { return value(); } /// Fold simple struct access operations that access into a constant. -OpFoldResult StructAccessOp::fold(ArrayRef operands) { +ValueOrAttr StructAccessOp::fold(ArrayRef operands) { auto structAttr = operands.front().dyn_cast_or_null(); if (!structAttr) return nullptr; diff --git a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp --- a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp +++ b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp @@ -24,15 +24,15 @@ } // end anonymous namespace /// Fold constants. -OpFoldResult ConstantOp::fold(ArrayRef operands) { return value(); } +ValueOrAttr ConstantOp::fold(ArrayRef operands) { return value(); } /// Fold struct constants. -OpFoldResult StructConstantOp::fold(ArrayRef operands) { +ValueOrAttr StructConstantOp::fold(ArrayRef operands) { return value(); } /// Fold simple struct access operations that access into a constant. -OpFoldResult StructAccessOp::fold(ArrayRef operands) { +ValueOrAttr StructAccessOp::fold(ArrayRef operands) { auto structAttr = operands.front().dyn_cast_or_null(); if (!structAttr) return nullptr; diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h @@ -233,7 +233,7 @@ void print(OpAsmPrinter &p); LogicalResult verify(); LogicalResult fold(ArrayRef cstOperands, - SmallVectorImpl &results); + SmallVectorImpl &results); /// Returns true if this DMA operation is strided, returns false otherwise. bool isStrided() { @@ -318,7 +318,7 @@ void print(OpAsmPrinter &p); LogicalResult verify(); LogicalResult fold(ArrayRef cstOperands, - SmallVectorImpl &results); + SmallVectorImpl &results); }; /// Returns true if the given Value can be used as a dimension id in the region diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -117,7 +117,7 @@ [{ build($_builder, $_state, ValueRange{}, staticShape, elementType); }]>, - OpBuilderDAG<(ins "ArrayRef":$sizes, "Type":$elementType, + OpBuilderDAG<(ins "ArrayRef":$sizes, "Type":$elementType, CArg<"ArrayRef", "{}">:$attrs)> ]; @@ -231,14 +231,14 @@ // I.e., a block will be created and the `pad` value will be yielded // directly. If the type passed is nullptr, it is inferred. static linalg::PadTensorOp createPadScalarOp( - Type type, Value source, Value pad, ArrayRef low, - ArrayRef high, Location loc, OpBuilder & builder); + Type type, Value source, Value pad, ArrayRef low, + ArrayRef high, Location loc, OpBuilder & builder); // Return a vector of all the static or dynamic values (low/high padding) of // the op. - inline SmallVector getMixedPadImpl(ArrayAttr staticAttrs, + inline SmallVector getMixedPadImpl(ArrayAttr staticAttrs, ValueRange values) { - SmallVector res; + SmallVector res; unsigned numDynamic = 0; unsigned count = staticAttrs.size(); for (unsigned idx = 0; idx < count; ++idx) { @@ -249,10 +249,10 @@ } return res; } - SmallVector getMixedLowPad() { + SmallVector getMixedLowPad() { return getMixedPadImpl(static_low(), low()); } - SmallVector getMixedHighPad() { + SmallVector getMixedHighPad() { return getMixedPadImpl(static_high(), high()); } }]; @@ -268,7 +268,7 @@ // Build a PadTensorOp with mixed static and dynamic entries and custom // result type. If the type passed is nullptr, it is inferred. OpBuilderDAG<(ins "Type":$resultType, "Value":$source, - "ArrayRef":$low, "ArrayRef":$high, + "ArrayRef":$low, "ArrayRef":$high, CArg<"ArrayRef", "{}">:$attrs)> ]; } diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.h b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.h --- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.h +++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.h @@ -237,7 +237,7 @@ LogicalResult verify(); LogicalResult fold(ArrayRef cstOperands, - SmallVectorImpl &results); + SmallVectorImpl &results); bool isStrided() { return getNumOperands() != 1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + @@ -300,7 +300,7 @@ static ParseResult parse(OpAsmParser &parser, OperationState &result); void print(OpAsmPrinter &p); LogicalResult fold(ArrayRef cstOperands, - SmallVectorImpl &results); + SmallVectorImpl &results); LogicalResult verify(); }; diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td --- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td @@ -1748,8 +1748,8 @@ let builders = [ // Build a ReinterpretCastOp with mixed static and dynamic entries. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, - "OpFoldResult":$offset, "ArrayRef":$sizes, - "ArrayRef":$strides, + "ValueOrAttr":$offset, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, // Build a ReinterpretCastOp with static entries. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, @@ -2659,14 +2659,14 @@ let builders = [ // Build a SubViewOp with mixed static and dynamic entries and custom // result type. If the type passed is nullptr, it is inferred. - OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, - "ArrayRef":$sizes, "ArrayRef":$strides, + OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, + "ArrayRef":$sizes, "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, // Build a SubViewOp with mixed static and dynamic entries and inferred // result type. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, - "ArrayRef":$offsets, "ArrayRef":$sizes, - "ArrayRef":$strides, + "ArrayRef":$offsets, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, // Build a SubViewOp with static entries and custom result type. If the // type passed is nullptr, it is inferred. @@ -2706,9 +2706,9 @@ ArrayRef staticSizes, ArrayRef staticStrides); static Type inferResultType(MemRefType sourceMemRefType, - ArrayRef staticOffsets, - ArrayRef staticSizes, - ArrayRef staticStrides); + ArrayRef staticOffsets, + ArrayRef staticSizes, + ArrayRef staticStrides); /// Return the expected rank of each of the`static_offsets`, `static_sizes` /// and `static_strides` attributes. @@ -2793,14 +2793,14 @@ let builders = [ // Build a SubTensorOp with mixed static and dynamic entries and inferred // result type. - OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, - "ArrayRef":$sizes, "ArrayRef":$strides, + OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, + "ArrayRef":$sizes, "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, // Build a SubTensorOp with mixed static and dynamic entries and custom // result type. If the type passed is nullptr, it is inferred. OpBuilderDAG<(ins "RankedTensorType":$resultType, "Value":$source, - "ArrayRef":$offsets, "ArrayRef":$sizes, - "ArrayRef":$strides, + "ArrayRef":$offsets, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, // Build a SubTensorOp with dynamic entries and custom result type. If the // type passed is nullptr, it is inferred. @@ -2832,9 +2832,9 @@ ArrayRef staticSizes, ArrayRef staticStrides); static Type inferResultType(RankedTensorType sourceRankedTensorType, - ArrayRef staticOffsets, - ArrayRef staticSizes, - ArrayRef staticStrides); + ArrayRef staticOffsets, + ArrayRef staticSizes, + ArrayRef staticStrides); /// Return the expected rank of each of the`static_offsets`, `static_sizes` /// and `static_strides` attributes. @@ -2916,8 +2916,8 @@ let builders = [ // Build a SubTensorInsertOp with mixed static and dynamic entries. OpBuilderDAG<(ins "Value":$source, "Value":$dest, - "ArrayRef":$offsets, "ArrayRef":$sizes, - "ArrayRef":$strides, + "ArrayRef":$offsets, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, // Build a SubTensorInsertOp with dynamic entries. OpBuilderDAG<(ins "Value":$source, "Value":$dest, diff --git a/mlir/include/mlir/IR/Matchers.h b/mlir/include/mlir/IR/Matchers.h --- a/mlir/include/mlir/IR/Matchers.h +++ b/mlir/include/mlir/IR/Matchers.h @@ -69,7 +69,7 @@ return false; // Fold the constant to an attribute. - SmallVector foldedOp; + SmallVector foldedOp; LogicalResult result = op->fold(/*operands=*/llvm::None, foldedOp); (void)result; assert(succeeded(result) && "expected ConstantLike op to be foldable"); diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h --- a/mlir/include/mlir/IR/OpDefinition.h +++ b/mlir/include/mlir/IR/OpDefinition.h @@ -213,10 +213,10 @@ return lhs.getOperation() != rhs.getOperation(); } -raw_ostream &operator<<(raw_ostream &os, OpFoldResult ofr); +raw_ostream &operator<<(raw_ostream &os, ValueOrAttr ofr); /// This class represents a single result from folding an operation. -class OpFoldResult : public PointerUnion { +class ValueOrAttr : public PointerUnion { using PointerUnion::PointerUnion; public: @@ -224,7 +224,7 @@ }; /// Allow printing to a stream. -inline raw_ostream &operator<<(raw_ostream &os, OpFoldResult ofr) { +inline raw_ostream &operator<<(raw_ostream &os, ValueOrAttr ofr) { if (Value value = ofr.dyn_cast()) value.print(os); else @@ -248,8 +248,8 @@ // corresponding trait classes. This avoids them being template // instantiated/duplicated. namespace impl { -OpFoldResult foldIdempotent(Operation *op); -OpFoldResult foldInvolution(Operation *op); +ValueOrAttr foldIdempotent(Operation *op); +ValueOrAttr foldInvolution(Operation *op); LogicalResult verifyZeroOperands(Operation *op); LogicalResult verifyOneOperand(Operation *op); LogicalResult verifyNOperands(Operation *op, unsigned numOperands); @@ -1016,7 +1016,7 @@ return impl::verifyIsInvolution(op); } - static OpFoldResult foldTrait(Operation *op, ArrayRef operands) { + static ValueOrAttr foldTrait(Operation *op, ArrayRef operands) { return impl::foldInvolution(op); } }; @@ -1038,7 +1038,7 @@ return impl::verifyIsIdempotent(op); } - static OpFoldResult foldTrait(Operation *op, ArrayRef operands) { + static ValueOrAttr foldTrait(Operation *op, ArrayRef operands) { return impl::foldIdempotent(op); } }; @@ -1338,7 +1338,7 @@ using has_fold_trait = decltype(T::foldTrait(std::declval(), std::declval>(), - std::declval &>())); + std::declval &>())); template using detect_has_fold_trait = llvm::is_detected; /// Trait to check if T provides any `foldTrait` method. @@ -1355,7 +1355,7 @@ static std::enable_if_t::value, LogicalResult> foldTrait(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { assert(op->hasTrait() && "expected trait on non single-result operation to implement the " "general `foldTrait` method"); @@ -1364,7 +1364,7 @@ if (!results.empty()) return failure(); - if (OpFoldResult result = Trait::foldTrait(op, operands)) { + if (ValueOrAttr result = Trait::foldTrait(op, operands)) { if (result.template dyn_cast() != op->getResult(0)) results.push_back(result); return success(); @@ -1376,7 +1376,7 @@ template static std::enable_if_t::value, LogicalResult> foldTrait(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { // If a previous trait has already been folded and replaced this operation, we // fail to fold this trait. return results.empty() ? Trait::foldTrait(op, operands, results) : failure(); @@ -1386,7 +1386,7 @@ /// folding a set of trait types `Ts` that implement a `foldTrait` method. template static LogicalResult foldTraitsImpl(Operation *op, ArrayRef operands, - SmallVectorImpl &results, + SmallVectorImpl &results, std::tuple *) { bool anyFolded = false; (void)std::initializer_list{ @@ -1399,7 +1399,7 @@ template static std::enable_if_t::value != 0, LogicalResult> foldTraits(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { return foldTraitsImpl(op, operands, results, (TraitTupleT *)nullptr); } /// A variant of the method above that is specialized when there are no traits @@ -1407,7 +1407,7 @@ template static std::enable_if_t::value == 0, LogicalResult> foldTraits(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { return failure(); } @@ -1512,9 +1512,9 @@ llvm::is_detected; /// Trait to check if T provides a general 'fold' method. template - using has_fold = decltype( - std::declval().fold(std::declval>(), - std::declval &>())); + using has_fold = decltype(std::declval().fold( + std::declval>(), + std::declval &>())); template using detect_has_fold = llvm::is_detected; /// Trait to check if T provides a 'print' method. @@ -1579,8 +1579,8 @@ template static LogicalResult foldSingleResultHook(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { - OpFoldResult result = cast(op).fold(operands); + SmallVectorImpl &results) { + ValueOrAttr result = cast(op).fold(operands); // If the fold failed or was in-place, try to fold the traits of the // operation. @@ -1596,7 +1596,7 @@ /// Return the result of folding an operation that defines a `fold` method. template static LogicalResult foldHook(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { LogicalResult result = cast(op).fold(operands, results); // If the fold failed or was in-place, try to fold the traits of the @@ -1712,7 +1712,7 @@ /// Attempt to fold the given cast operation. LogicalResult foldCastInterfaceOp(Operation *op, ArrayRef attrOperands, - SmallVectorImpl &foldResults); + SmallVectorImpl &foldResults); /// Attempt to verify the given cast operation. LogicalResult verifyCastInterfaceOp( Operation *op, function_ref areCastCompatible); diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h --- a/mlir/include/mlir/IR/Operation.h +++ b/mlir/include/mlir/IR/Operation.h @@ -460,7 +460,7 @@ /// the operation, but may be null if non-constant. If folding is successful, /// this fills in the `results` vector. If not, `results` is unspecified. LogicalResult fold(ArrayRef operands, - SmallVectorImpl &results); + SmallVectorImpl &results); /// Returns true if the operation was registered with a particular trait, e.g. /// hasTrait(). diff --git a/mlir/include/mlir/IR/OperationSupport.h b/mlir/include/mlir/IR/OperationSupport.h --- a/mlir/include/mlir/IR/OperationSupport.h +++ b/mlir/include/mlir/IR/OperationSupport.h @@ -38,7 +38,7 @@ class OpAsmParserResult; class OpAsmPrinter; class OperandRange; -class OpFoldResult; +class ValueOrAttr; class ParseResult; class Pattern; class Region; @@ -64,7 +64,7 @@ using GetCanonicalizationPatternsFn = void (*)(OwningRewritePatternList &, MLIRContext *); using FoldHookFn = LogicalResult (*)(Operation *, ArrayRef, - SmallVectorImpl &); + SmallVectorImpl &); using HasTraitFn = bool (*)(TypeID); using ParseAssemblyFn = ParseResult (*)(OpAsmParser &, OperationState &); using PrintAssemblyFn = void (*)(Operation *, OpAsmPrinter &); @@ -114,7 +114,7 @@ /// "x+0 -> x", "min(x,y,x,z) -> min(x,y,z)", "x+y-x -> y", etc), as well as /// generalized constant folding. LogicalResult foldHook(Operation *op, ArrayRef operands, - SmallVectorImpl &results) const { + SmallVectorImpl &results) const { return foldHookFn(op, operands, results); } diff --git a/mlir/include/mlir/Interfaces/CastInterfaces.td b/mlir/include/mlir/Interfaces/CastInterfaces.td --- a/mlir/include/mlir/Interfaces/CastInterfaces.td +++ b/mlir/include/mlir/Interfaces/CastInterfaces.td @@ -39,7 +39,7 @@ let extraTraitClassDeclaration = [{ /// Attempt to fold the given cast operation. static LogicalResult foldTrait(Operation *op, ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { return impl::foldCastInterfaceOp(op, operands, results); } }]; diff --git a/mlir/include/mlir/Interfaces/FoldInterfaces.h b/mlir/include/mlir/Interfaces/FoldInterfaces.h --- a/mlir/include/mlir/Interfaces/FoldInterfaces.h +++ b/mlir/include/mlir/Interfaces/FoldInterfaces.h @@ -14,7 +14,7 @@ namespace mlir { class Attribute; -class OpFoldResult; +class ValueOrAttr; class Region; /// Define a fold interface to allow for dialects to control specific aspects @@ -31,7 +31,7 @@ /// folding is successful, this fills in the `results` vector. If not, this /// returns failure and `results` is unspecified. virtual LogicalResult fold(Operation *op, ArrayRef operands, - SmallVectorImpl &results) const { + SmallVectorImpl &results) const { return failure(); } diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.td b/mlir/include/mlir/Interfaces/ViewLikeInterface.td --- a/mlir/include/mlir/Interfaces/ViewLikeInterface.td +++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.td @@ -160,12 +160,12 @@ /*desc=*/[{ Return a vector of all the static or dynamic sizes of the op. }], - /*retTy=*/"::mlir::SmallVector<::mlir::OpFoldResult, 4>", + /*retTy=*/"::mlir::SmallVector<::mlir::ValueOrAttr, 4>", /*methodName=*/"getMixedOffsets", /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/[{ - ::mlir::SmallVector<::mlir::OpFoldResult, 4> res; + ::mlir::SmallVector<::mlir::ValueOrAttr, 4> res; unsigned numDynamic = 0; unsigned count = $_op.static_offsets().size(); for (unsigned idx = 0; idx < count; ++idx) { @@ -181,12 +181,12 @@ /*desc=*/[{ Return a vector of all the static or dynamic sizes of the op. }], - /*retTy=*/"::mlir::SmallVector<::mlir::OpFoldResult, 4>", + /*retTy=*/"::mlir::SmallVector<::mlir::ValueOrAttr, 4>", /*methodName=*/"getMixedSizes", /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/[{ - ::mlir::SmallVector<::mlir::OpFoldResult, 4> res; + ::mlir::SmallVector<::mlir::ValueOrAttr, 4> res; unsigned numDynamic = 0; unsigned count = $_op.static_sizes().size(); for (unsigned idx = 0; idx < count; ++idx) { @@ -202,12 +202,12 @@ /*desc=*/[{ Return a vector of all the static or dynamic strides of the op. }], - /*retTy=*/"::mlir::SmallVector<::mlir::OpFoldResult, 4>", + /*retTy=*/"::mlir::SmallVector<::mlir::ValueOrAttr, 4>", /*methodName=*/"getMixedStrides", /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/[{ - ::mlir::SmallVector<::mlir::OpFoldResult, 4> res; + ::mlir::SmallVector<::mlir::ValueOrAttr, 4> res; unsigned numDynamic = 0; unsigned count = $_op.static_strides().size(); for (unsigned idx = 0; idx < count; ++idx) { diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -3301,7 +3301,7 @@ subViewOp.getMixedOffsets().size()); i < e; ++i) { Value offset = - // TODO: need OpFoldResult ODS adaptor to clean this up. + // TODO: need ValueOrAttr ODS adaptor to clean this up. subViewOp.isDynamicOffset(i) ? operands[subViewOp.getIndexOfDynamicOffset(i)] : rewriter.create( @@ -3314,8 +3314,8 @@ } // Update sizes and strides. - SmallVector mixedSizes = subViewOp.getMixedSizes(); - SmallVector mixedStrides = subViewOp.getMixedStrides(); + SmallVector mixedSizes = subViewOp.getMixedSizes(); + SmallVector mixedStrides = subViewOp.getMixedStrides(); assert(mixedSizes.size() == mixedStrides.size() && "expected sizes and strides of equal length"); for (int i = inferredShapeRank - 1, j = resultShapeRank - 1; @@ -3334,7 +3334,7 @@ stride = rewriter.create( loc, llvmIndexType, rewriter.getI64IntegerAttr(1)); } else { - // TODO: need OpFoldResult ODS adaptor to clean this up. + // TODO: need ValueOrAttr ODS adaptor to clean this up. size = subViewOp.isDynamicSize(i) ? operands[subViewOp.getIndexOfDynamicSize(i)] diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -555,7 +555,7 @@ }); } -OpFoldResult AffineApplyOp::fold(ArrayRef operands) { +ValueOrAttr AffineApplyOp::fold(ArrayRef operands) { auto map = getAffineMap(); // Fold dims and symbols to existing values. @@ -1094,7 +1094,7 @@ } LogicalResult AffineDmaStartOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// dma_start(memrefcast) -> dma_start return foldMemRefCast(*this); } @@ -1171,7 +1171,7 @@ } LogicalResult AffineDmaWaitOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// dma_wait(memrefcast) -> dma_wait return foldMemRefCast(*this); } @@ -1617,7 +1617,7 @@ } LogicalResult AffineForOp::fold(ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { bool folded = succeeded(foldLoopBounds(*this)); folded |= succeeded(canonicalizeLoopBounds(*this)); return success(folded); @@ -2016,7 +2016,7 @@ /// Canonicalize an affine if op's conditional (integer set + operands). LogicalResult AffineIfOp::fold(ArrayRef, - SmallVectorImpl &) { + SmallVectorImpl &) { auto set = getIntegerSet(); SmallVector operands(getOperands()); canonicalizeSetAndOperands(&set, &operands); @@ -2153,11 +2153,11 @@ results.insert>(context); } -OpFoldResult AffineLoadOp::fold(ArrayRef cstOperands) { +ValueOrAttr AffineLoadOp::fold(ArrayRef cstOperands) { /// load(memrefcast) -> load if (succeeded(foldMemRefCast(*this))) return getResult(); - return OpFoldResult(); + return ValueOrAttr(); } //===----------------------------------------------------------------------===// @@ -2243,7 +2243,7 @@ } LogicalResult AffineStoreOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// store(memrefcast) -> store return foldMemRefCast(*this); } @@ -2295,7 +2295,7 @@ /// list may contain nulls, which are interpreted as the operand not being a /// constant. template -static OpFoldResult foldMinMaxOp(T op, ArrayRef operands) { +static ValueOrAttr foldMinMaxOp(T op, ArrayRef operands) { static_assert(llvm::is_one_of::value, "expected affine min or max op"); @@ -2330,7 +2330,7 @@ // %0 = affine.min (d0) -> (1000, d0 + 512) (%i0) // -OpFoldResult AffineMinOp::fold(ArrayRef operands) { +ValueOrAttr AffineMinOp::fold(ArrayRef operands) { return foldMinMaxOp(*this, operands); } @@ -2346,7 +2346,7 @@ // %0 = affine.max (d0) -> (1000, d0 + 512) (%i0) // -OpFoldResult AffineMaxOp::fold(ArrayRef operands) { +ValueOrAttr AffineMaxOp::fold(ArrayRef operands) { return foldMinMaxOp(*this, operands); } @@ -2457,7 +2457,7 @@ } LogicalResult AffinePrefetchOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// prefetch(memrefcast) -> prefetch return foldMemRefCast(*this); } @@ -2706,7 +2706,7 @@ } LogicalResult AffineParallelOp::fold(ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { return canonicalizeLoopBounds(*this); } diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -86,15 +86,15 @@ template static void printNamedStructuredOp(OpAsmPrinter &p, NamedStructuredOpType op); -/// Helper function to dispatch an OpFoldResult into either the `dynamicVec` if +/// Helper function to dispatch an ValueOrAttr into either the `dynamicVec` if /// it is a Value or into `staticVec` if it is an IntegerAttr. /// In the case of a Value, a copy of the `sentinel` value is also pushed to /// `staticVec`. This is useful to extract mixed static and dynamic entries that /// come from an AttrSizedOperandSegments trait. -static void dispatchIndexOpFoldResult(OpFoldResult ofr, - SmallVectorImpl &dynamicVec, - SmallVectorImpl &staticVec, - int64_t sentinel) { +static void dispatchIndexValueOrAttr(ValueOrAttr ofr, + SmallVectorImpl &dynamicVec, + SmallVectorImpl &staticVec, + int64_t sentinel) { if (auto v = ofr.dyn_cast()) { dynamicVec.push_back(v); staticVec.push_back(sentinel); @@ -557,7 +557,7 @@ // InitTensorOp //===----------------------------------------------------------------------===// void InitTensorOp::build(OpBuilder &b, OperationState &result, - ArrayRef sizes, Type elementType, + ArrayRef sizes, Type elementType, ArrayRef attrs) { unsigned rank = sizes.size(); SmallVector dynamicSizes; @@ -567,8 +567,8 @@ // This will grow staticLow and staticHigh with 1 value. If the config is // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1 // value as well. - dispatchIndexOpFoldResult(sizes[i], dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + dispatchIndexValueOrAttr(sizes[i], dynamicSizes, staticSizes, + ShapedType::kDynamicSize); } auto resultType = RankedTensorType ::get(staticSizes, elementType); build(b, result, resultType, dynamicSizes, b.getI64ArrayAttr(staticSizes)); @@ -913,8 +913,8 @@ } void PadTensorOp::build(OpBuilder &b, OperationState &result, Type resultType, - Value source, ArrayRef low, - ArrayRef high, + Value source, ArrayRef low, + ArrayRef high, ArrayRef attrs) { assert(resultType.isa()); auto sourceType = source.getType().cast(); @@ -926,10 +926,10 @@ // This will grow staticLow and staticHigh with 1 value. If the config is // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1 // value as well. - dispatchIndexOpFoldResult(low[i], dynamicLow, staticLow, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResult(high[i], dynamicHigh, staticHigh, - ShapedType::kDynamicSize); + dispatchIndexValueOrAttr(low[i], dynamicLow, staticLow, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttr(high[i], dynamicHigh, staticHigh, + ShapedType::kDynamicSize); } if (!resultType) { resultType = @@ -940,8 +940,8 @@ } PadTensorOp PadTensorOp::createPadScalarOp(Type type, Value source, Value pad, - ArrayRef low, - ArrayRef high, + ArrayRef low, + ArrayRef high, Location loc, OpBuilder &builder) { auto padTensorOp = builder.create(loc, type, source, low, high); @@ -959,7 +959,7 @@ PadTensorOp PadTensorOp::createPadHighOp(Type type, Value source, Value pad, Location loc, OpBuilder &builder) { - SmallVector low, high; + SmallVector low, high; auto rankedTensorType = type.cast(); assert(rankedTensorType.hasStaticShape()); int rank = rankedTensorType.getRank(); @@ -1069,8 +1069,8 @@ } // namespace template -static OpFoldResult foldReshapeOp(ReshapeOpTy reshapeOp, - ArrayRef operands) { +static ValueOrAttr foldReshapeOp(ReshapeOpTy reshapeOp, + ArrayRef operands) { // Fold producer-consumer reshape ops that where the operand type of the // producer is same as the return type of the consumer. ReshapeOpTy reshapeSrcOp = @@ -1777,12 +1777,12 @@ // TODO: Consider making all this boilerplate easy to autogenerate // with Tablegen. This seems a desirable property in the context of // OpInterfaces where a Linalg "named" op **isa** LinalgOp. -OpFoldResult ReshapeOp::fold(ArrayRef operands) { +ValueOrAttr ReshapeOp::fold(ArrayRef operands) { if (succeeded(foldMemRefCast(*this))) return getResult(); return foldReshapeOp(*this, operands); } -OpFoldResult TensorReshapeOp::fold(ArrayRef operands) { +ValueOrAttr TensorReshapeOp::fold(ArrayRef operands) { return foldReshapeOp(*this, operands); } @@ -2265,7 +2265,7 @@ } \ \ LogicalResult XXX::fold(ArrayRef, \ - SmallVectorImpl &) { \ + SmallVectorImpl &) { \ return foldMemRefCast(*this); \ } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -61,9 +61,9 @@ // by `permutationMap`. static void inferShapeComponents(AffineMap permutationMap, ArrayRef loopRanges, - SmallVectorImpl &offsets, - SmallVectorImpl &sizes, - SmallVectorImpl &strides) { + SmallVectorImpl &offsets, + SmallVectorImpl &sizes, + SmallVectorImpl &strides) { assert(permutationMap.isProjectedPermutation() && "expected some subset of a permutation map"); SmallVector shapeRanges(permutationMap.getNumResults()); @@ -101,7 +101,7 @@ AffineMap map = op.getIndexingMap(shapedOperandIdx); LLVM_DEBUG(llvm::dbgs() << "shapedOperandIdx: " << shapedOperandIdx << " with indexingMap: " << map << "\n"); - SmallVector offsets, sizes, strides; + SmallVector offsets, sizes, strides; inferShapeComponents(map, loopRanges, offsets, sizes, strides); Value shape = en.value(); Value sub = shape.getType().isa() diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp @@ -98,8 +98,8 @@ } // namespace /// Return true if op1 and op2 are the same constant or the same SSA value. -static bool isEqualOffsetSizeOrStride(OpFoldResult op1, OpFoldResult op2) { - auto getConstantIntValue = [](OpFoldResult ofr) -> llvm::Optional { +static bool isEqualOffsetSizeOrStride(ValueOrAttr op1, ValueOrAttr op2) { + auto getConstantIntValue = [](ValueOrAttr ofr) -> llvm::Optional { Attribute attr = ofr.dyn_cast(); // Note: isa+cast-like pattern allows writing the condition below as 1 line. if (!attr && ofr.get().getDefiningOp()) @@ -731,11 +731,11 @@ // Stack step 2. create SubTensorInsertOp at the top of the stack. // offsets = [clonedLoopIvs, 0 .. 0]. - SmallVector offsets(leadingPackedTensorIndexings.begin(), - leadingPackedTensorIndexings.end()); + SmallVector offsets(leadingPackedTensorIndexings.begin(), + leadingPackedTensorIndexings.end()); offsets.append(paddedRank, b.getIndexAttr(0)); // sizes = [1 .. 1, paddedShape]. - SmallVector sizes(nLoops, b.getIndexAttr(1)); + SmallVector sizes(nLoops, b.getIndexAttr(1)); for (int64_t sz : paddedTensorType.getShape()) { // TODO: go grab dims when necessary, for now PadTensorOp returns a static // tensor. @@ -743,7 +743,7 @@ sizes.push_back(b.getIndexAttr(sz)); } // strides = [1 .. 1]. - SmallVector strides(nLoops + paddedRank, b.getIndexAttr(1)); + SmallVector strides(nLoops + paddedRank, b.getIndexAttr(1)); Value inserted = b.create(loc, bvm.lookup(padTensorOp.result()), diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -215,7 +215,7 @@ auto viewType = subView.getType(); auto rank = viewType.getRank(); SmallVector fullSizes; - SmallVector partialSizes; + SmallVector partialSizes; fullSizes.reserve(rank); partialSizes.reserve(rank); for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { @@ -235,8 +235,8 @@ Optional fullLocalView = allocationFn(b, subView, fullSizes, folder); if (!fullLocalView) return {}; - SmallVector zeros(fullSizes.size(), b.getIndexAttr(0)); - SmallVector ones(fullSizes.size(), b.getIndexAttr(1)); + SmallVector zeros(fullSizes.size(), b.getIndexAttr(0)); + SmallVector ones(fullSizes.size(), b.getIndexAttr(1)); auto partialLocalView = folded_std_subview(folder, *fullLocalView, zeros, partialSizes, ones); return PromotionInfo{*fullLocalView, partialLocalView}; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -255,7 +255,7 @@ } // Construct a new subview / subtensor for the tile. - SmallVector offsets, sizes, strides; + SmallVector offsets, sizes, strides; offsets.reserve(rank); sizes.reserve(rank); strides.reserve(rank); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -209,14 +209,14 @@ llvm::SetVector newUsersOfOpToPad; for (auto it : llvm::zip(opToPad->getResults(), paddedOp->getResults())) { auto rank = std::get<0>(it).getType().cast().getRank(); - SmallVector offsets(rank, rewriter.getIndexAttr(0)); + SmallVector offsets(rank, rewriter.getIndexAttr(0)); auto sizes = llvm::to_vector<4>(llvm::map_range( - llvm::seq(0, rank), [&](unsigned d) -> OpFoldResult { + llvm::seq(0, rank), [&](unsigned d) -> ValueOrAttr { auto dimOp = rewriter.create(loc, std::get<0>(it), d); newUsersOfOpToPad.insert(dimOp); return dimOp.getResult(); })); - SmallVector strides(rank, rewriter.getIndexAttr(1)); + SmallVector strides(rank, rewriter.getIndexAttr(1)); paddedSubviewResults.push_back(rewriter.create( loc, std::get<1>(it), offsets, sizes, strides)); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -424,8 +424,8 @@ /// are determined to be zero. LogicalResult PadTensorOpVectorizationPattern::matchAndRewrite( linalg::PadTensorOp padOp, PatternRewriter &rewriter) const { - // Helper function to determine whether an OpFoldResult is not a zero Index. - auto isNotZeroIndex = [](OpFoldResult ofr) { + // Helper function to determine whether an ValueOrAttr is not a zero Index. + auto isNotZeroIndex = [](ValueOrAttr ofr) { if (Attribute attr = ofr.dyn_cast()) return attr.cast().getInt() != 0; Value v = ofr.get(); @@ -462,7 +462,7 @@ // TODO: if any pad_high is not a static 0, needs a mask. For now, just bail. if (llvm::any_of(padOp.getMixedHighPad(), - [&](OpFoldResult ofr) { return isNotZeroIndex(ofr); })) + [&](ValueOrAttr ofr) { return isNotZeroIndex(ofr); })) return failure(); // Now we can rewrite as InitTensorOp + TransferReadOp@[0..0] + diff --git a/mlir/lib/Dialect/Quant/IR/QuantOps.cpp b/mlir/lib/Dialect/Quant/IR/QuantOps.cpp --- a/mlir/lib/Dialect/Quant/IR/QuantOps.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantOps.cpp @@ -32,12 +32,12 @@ >(); } -OpFoldResult StorageCastOp::fold(ArrayRef operands) { +ValueOrAttr StorageCastOp::fold(ArrayRef operands) { // Matches x -> [scast -> scast] -> y, replacing the second scast with the // value of x if the casts invert each other. auto srcScastOp = arg().getDefiningOp(); if (!srcScastOp || srcScastOp.arg().getType() != getType()) - return OpFoldResult(); + return ValueOrAttr(); return srcScastOp.arg(); } diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp @@ -128,7 +128,7 @@ // spv.CompositeExtractOp //===----------------------------------------------------------------------===// -OpFoldResult spirv::CompositeExtractOp::fold(ArrayRef operands) { +ValueOrAttr spirv::CompositeExtractOp::fold(ArrayRef operands) { assert(operands.size() == 1 && "spv.CompositeExtract expects one operand"); auto indexVector = llvm::to_vector<8>(llvm::map_range(indices(), [](Attribute attr) { @@ -141,7 +141,7 @@ // spv.constant //===----------------------------------------------------------------------===// -OpFoldResult spirv::ConstantOp::fold(ArrayRef operands) { +ValueOrAttr spirv::ConstantOp::fold(ArrayRef operands) { assert(operands.empty() && "spv.constant has no operands"); return value(); } @@ -150,7 +150,7 @@ // spv.IAdd //===----------------------------------------------------------------------===// -OpFoldResult spirv::IAddOp::fold(ArrayRef operands) { +ValueOrAttr spirv::IAddOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "spv.IAdd expects two operands"); // x + 0 = x if (matchPattern(operand2(), m_Zero())) @@ -169,7 +169,7 @@ // spv.IMul //===----------------------------------------------------------------------===// -OpFoldResult spirv::IMulOp::fold(ArrayRef operands) { +ValueOrAttr spirv::IMulOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "spv.IMul expects two operands"); // x * 0 == 0 if (matchPattern(operand2(), m_Zero())) @@ -191,7 +191,7 @@ // spv.ISub //===----------------------------------------------------------------------===// -OpFoldResult spirv::ISubOp::fold(ArrayRef operands) { +ValueOrAttr spirv::ISubOp::fold(ArrayRef operands) { // x - x = 0 if (operand1() == operand2()) return Builder(getContext()).getIntegerAttr(getType(), 0); @@ -209,7 +209,7 @@ // spv.LogicalAnd //===----------------------------------------------------------------------===// -OpFoldResult spirv::LogicalAndOp::fold(ArrayRef operands) { +ValueOrAttr spirv::LogicalAndOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "spv.LogicalAnd should take two operands"); if (Optional rhs = getScalarOrSplatBoolAttr(operands.back())) { @@ -240,7 +240,7 @@ // spv.LogicalOr //===----------------------------------------------------------------------===// -OpFoldResult spirv::LogicalOrOp::fold(ArrayRef operands) { +ValueOrAttr spirv::LogicalOrOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "spv.LogicalOr should take two operands"); if (auto rhs = getScalarOrSplatBoolAttr(operands.back())) { diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -201,7 +201,7 @@ // TODO: Canonicalization should be implemented for shapes that can be // determined through mixtures of the known dimensions of the inputs. -OpFoldResult AnyOp::fold(ArrayRef operands) { +ValueOrAttr AnyOp::fold(ArrayRef operands) { // Only the last operand is checked because AnyOp is commutative. if (operands.back()) return operands.back(); @@ -320,7 +320,7 @@ patterns.insert(context); } -OpFoldResult AssumingAllOp::fold(ArrayRef operands) { +ValueOrAttr AssumingAllOp::fold(ArrayRef operands) { // Iterate in reverse to first handle all constant operands. They are // guaranteed to be the tail of the inputs because this is commutative. for (int idx = operands.size() - 1; idx >= 0; idx--) { @@ -353,7 +353,7 @@ // BroadcastOp //===----------------------------------------------------------------------===// -OpFoldResult BroadcastOp::fold(ArrayRef operands) { +ValueOrAttr BroadcastOp::fold(ArrayRef operands) { if (!operands[1]) return nullptr; @@ -395,7 +395,7 @@ // ConcatOp //===----------------------------------------------------------------------===// -OpFoldResult ConcatOp::fold(ArrayRef operands) { +ValueOrAttr ConcatOp::fold(ArrayRef operands) { if (!operands[0] || !operands[1]) return nullptr; auto lhsShape = llvm::to_vector<6>( @@ -453,7 +453,7 @@ return success(); } -OpFoldResult ConstShapeOp::fold(ArrayRef) { return shapeAttr(); } +ValueOrAttr ConstShapeOp::fold(ArrayRef) { return shapeAttr(); } void ConstShapeOp::getCanonicalizationPatterns( OwningRewritePatternList &patterns, MLIRContext *context) { @@ -490,7 +490,7 @@ patterns.insert(context); } -OpFoldResult CstrBroadcastableOp::fold(ArrayRef operands) { +ValueOrAttr CstrBroadcastableOp::fold(ArrayRef operands) { // TODO: Add folding for the nary case if (operands.size() != 2) return nullptr; @@ -546,7 +546,7 @@ patterns.insert(context); } -OpFoldResult CstrEqOp::fold(ArrayRef operands) { +ValueOrAttr CstrEqOp::fold(ArrayRef operands) { if (llvm::all_of(operands, [&](Attribute a) { return a && a == operands[0]; })) return BoolAttr::get(getContext(), true); @@ -566,7 +566,7 @@ build(builder, result, builder.getIndexAttr(value)); } -OpFoldResult ConstSizeOp::fold(ArrayRef) { return valueAttr(); } +ValueOrAttr ConstSizeOp::fold(ArrayRef) { return valueAttr(); } void ConstSizeOp::getAsmResultNames( llvm::function_ref setNameFn) { @@ -580,13 +580,13 @@ // ConstWitnessOp //===----------------------------------------------------------------------===// -OpFoldResult ConstWitnessOp::fold(ArrayRef) { return passingAttr(); } +ValueOrAttr ConstWitnessOp::fold(ArrayRef) { return passingAttr(); } //===----------------------------------------------------------------------===// // CstrRequireOp //===----------------------------------------------------------------------===// -OpFoldResult CstrRequireOp::fold(ArrayRef operands) { +ValueOrAttr CstrRequireOp::fold(ArrayRef operands) { return operands[0]; } @@ -594,7 +594,7 @@ // ShapeEqOp //===----------------------------------------------------------------------===// -OpFoldResult ShapeEqOp::fold(ArrayRef operands) { +ValueOrAttr ShapeEqOp::fold(ArrayRef operands) { if (lhs() == rhs()) return BoolAttr::get(getContext(), true); auto lhs = operands[0].dyn_cast_or_null(); @@ -610,7 +610,7 @@ // IndexToSizeOp //===----------------------------------------------------------------------===// -OpFoldResult IndexToSizeOp::fold(ArrayRef operands) { +ValueOrAttr IndexToSizeOp::fold(ArrayRef operands) { // Constant values of both types, `shape.size` and `index`, are represented as // `IntegerAttr`s which makes constant folding simple. if (Attribute arg = operands[0]) @@ -627,7 +627,7 @@ // FromExtentsOp //===----------------------------------------------------------------------===// -OpFoldResult FromExtentsOp::fold(ArrayRef operands) { +ValueOrAttr FromExtentsOp::fold(ArrayRef operands) { if (llvm::any_of(operands, [](Attribute a) { return !a; })) return nullptr; SmallVector extents; @@ -708,7 +708,7 @@ return llvm::None; } -OpFoldResult GetExtentOp::fold(ArrayRef operands) { +ValueOrAttr GetExtentOp::fold(ArrayRef operands) { auto elements = operands[0].dyn_cast_or_null(); if (!elements) return nullptr; @@ -749,7 +749,7 @@ // RankOp //===----------------------------------------------------------------------===// -OpFoldResult shape::RankOp::fold(ArrayRef operands) { +ValueOrAttr shape::RankOp::fold(ArrayRef operands) { auto shape = operands[0].dyn_cast_or_null(); if (!shape) return {}; @@ -808,7 +808,7 @@ // NumElementsOp //===----------------------------------------------------------------------===// -OpFoldResult NumElementsOp::fold(ArrayRef operands) { +ValueOrAttr NumElementsOp::fold(ArrayRef operands) { // Fold only when argument constant. Attribute shape = operands[0]; @@ -836,7 +836,7 @@ // MulOp //===----------------------------------------------------------------------===// -OpFoldResult MulOp::fold(ArrayRef operands) { +ValueOrAttr MulOp::fold(ArrayRef operands) { auto lhs = operands[0].dyn_cast_or_null(); if (!lhs) return nullptr; @@ -852,7 +852,7 @@ // ShapeOfOp //===----------------------------------------------------------------------===// -OpFoldResult ShapeOfOp::fold(ArrayRef) { +ValueOrAttr ShapeOfOp::fold(ArrayRef) { auto type = getOperand().getType().dyn_cast(); if (!type || !type.hasStaticShape()) return nullptr; @@ -893,7 +893,7 @@ // SizeToIndexOp //===----------------------------------------------------------------------===// -OpFoldResult SizeToIndexOp::fold(ArrayRef operands) { +ValueOrAttr SizeToIndexOp::fold(ArrayRef operands) { // Constant values of both types, `shape.size` and `index`, are represented as // `IntegerAttr`s which makes constant folding simple. if (Attribute arg = operands[0]) @@ -931,7 +931,7 @@ //===----------------------------------------------------------------------===// LogicalResult SplitAtOp::fold(ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { if (!operands[0] || !operands[1]) return failure(); auto shapeVec = llvm::to_vector<6>( @@ -955,7 +955,7 @@ // ToExtentTensorOp //===----------------------------------------------------------------------===// -OpFoldResult ToExtentTensorOp::fold(ArrayRef operands) { +ValueOrAttr ToExtentTensorOp::fold(ArrayRef operands) { if (!operands[0]) return impl::foldCastOp(*this); Builder builder(getContext()); diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp --- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp @@ -33,15 +33,15 @@ using namespace mlir; -/// Helper function to dispatch an OpFoldResult into either the `dynamicVec` if +/// Helper function to dispatch an ValueOrAttr into either the `dynamicVec` if /// it is a Value or into `staticVec` if it is an IntegerAttr. /// In the case of a Value, a copy of the `sentinel` value is also pushed to /// `staticVec`. This is useful to extract mixed static and dynamic entries that /// come from an AttrSizedOperandSegments trait. -static void dispatchIndexOpFoldResult(OpFoldResult ofr, - SmallVectorImpl &dynamicVec, - SmallVectorImpl &staticVec, - int64_t sentinel) { +static void dispatchIndexValueOrAttr(ValueOrAttr ofr, + SmallVectorImpl &dynamicVec, + SmallVectorImpl &staticVec, + int64_t sentinel) { if (auto v = ofr.dyn_cast()) { dynamicVec.push_back(v); staticVec.push_back(sentinel); @@ -51,12 +51,12 @@ staticVec.push_back(apInt.getSExtValue()); } -static void dispatchIndexOpFoldResults(ArrayRef ofrs, - SmallVectorImpl &dynamicVec, - SmallVectorImpl &staticVec, - int64_t sentinel) { +static void dispatchIndexValueOrAttrs(ArrayRef ofrs, + SmallVectorImpl &dynamicVec, + SmallVectorImpl &staticVec, + int64_t sentinel) { for (auto ofr : ofrs) - dispatchIndexOpFoldResult(ofr, dynamicVec, staticVec, sentinel); + dispatchIndexValueOrAttr(ofr, dynamicVec, staticVec, sentinel); } //===----------------------------------------------------------------------===// @@ -247,7 +247,7 @@ // AddFOp //===----------------------------------------------------------------------===// -OpFoldResult AddFOp::fold(ArrayRef operands) { +ValueOrAttr AddFOp::fold(ArrayRef operands) { return constFoldBinaryOp( operands, [](APFloat a, APFloat b) { return a + b; }); } @@ -256,7 +256,7 @@ // AddIOp //===----------------------------------------------------------------------===// -OpFoldResult AddIOp::fold(ArrayRef operands) { +ValueOrAttr AddIOp::fold(ArrayRef operands) { /// addi(x, 0) -> x if (matchPattern(rhs(), m_Zero())) return lhs(); @@ -402,7 +402,7 @@ // AndOp //===----------------------------------------------------------------------===// -OpFoldResult AndOp::fold(ArrayRef operands) { +ValueOrAttr AndOp::fold(ArrayRef operands) { /// and(x, 0) -> 0 if (matchPattern(rhs(), m_Zero())) return rhs(); @@ -839,7 +839,7 @@ } // Constant folding hook for comparisons. -OpFoldResult CmpIOp::fold(ArrayRef operands) { +ValueOrAttr CmpIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "cmpi takes two arguments"); if (lhs() == rhs()) { @@ -917,7 +917,7 @@ } // Constant folding hook for comparisons. -OpFoldResult CmpFOp::fold(ArrayRef operands) { +ValueOrAttr CmpFOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "cmpf takes two arguments"); auto lhs = operands.front().dyn_cast_or_null(); @@ -1213,7 +1213,7 @@ return op.emitOpError("unsupported 'value' attribute: ") << value; } -OpFoldResult ConstantOp::fold(ArrayRef operands) { +ValueOrAttr ConstantOp::fold(ArrayRef operands) { assert(operands.empty() && "constant has no operands"); return getValue(); } @@ -1341,7 +1341,7 @@ } LogicalResult DeallocOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// dealloc(memrefcast) -> dealloc return foldMemRefCast(*this); } @@ -1392,7 +1392,7 @@ return success(); } -OpFoldResult DimOp::fold(ArrayRef operands) { +ValueOrAttr DimOp::fold(ArrayRef operands) { auto index = operands[1].dyn_cast_or_null(); // All forms of folding require a known index. @@ -1521,7 +1521,7 @@ // DivFOp // --------------------------------------------------------------------------- -OpFoldResult DivFOp::fold(ArrayRef operands) { +ValueOrAttr DivFOp::fold(ArrayRef operands) { return constFoldBinaryOp( operands, [](APFloat a, APFloat b) { return a / b; }); } @@ -1696,7 +1696,7 @@ } LogicalResult DmaStartOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// dma_start(memrefcast) -> dma_start return foldMemRefCast(*this); } @@ -1745,7 +1745,7 @@ } LogicalResult DmaWaitOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// dma_wait(memrefcast) -> dma_wait return foldMemRefCast(*this); } @@ -1952,7 +1952,7 @@ (a.isSignlessInteger() && b.isIndex()); } -OpFoldResult IndexCastOp::fold(ArrayRef cstOperands) { +ValueOrAttr IndexCastOp::fold(ArrayRef cstOperands) { // Fold IndexCast(IndexCast(x)) -> x auto cast = getOperand().getDefiningOp(); if (cast && cast.getOperand().getType() == getType()) @@ -1977,11 +1977,11 @@ return success(); } -OpFoldResult LoadOp::fold(ArrayRef cstOperands) { +ValueOrAttr LoadOp::fold(ArrayRef cstOperands) { /// load(memrefcast) -> load if (succeeded(foldMemRefCast(*this))) return getResult(); - return OpFoldResult(); + return ValueOrAttr(); } namespace { @@ -2087,7 +2087,7 @@ return false; } -OpFoldResult MemRefCastOp::fold(ArrayRef operands) { +ValueOrAttr MemRefCastOp::fold(ArrayRef operands) { return succeeded(foldMemRefCast(*this)) ? getResult() : Value(); } @@ -2100,18 +2100,18 @@ /// source-memref-rank sentinel values that encode dynamic entries. void mlir::MemRefReinterpretCastOp::build(OpBuilder &b, OperationState &result, MemRefType resultType, Value source, - OpFoldResult offset, - ArrayRef sizes, - ArrayRef strides, + ValueOrAttr offset, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); - dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(offset, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttrs(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); build(b, result, resultType, source, dynamicOffsets, dynamicSizes, dynamicStrides, b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); @@ -2124,12 +2124,10 @@ ArrayRef sizes, ArrayRef strides, ArrayRef attrs) { - SmallVector sizeValues = - llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { - return b.getI64IntegerAttr(v); - })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { + SmallVector sizeValues = llvm::to_vector<4>(llvm::map_range( + sizes, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues, @@ -2141,10 +2139,10 @@ Value offset, ValueRange sizes, ValueRange strides, ArrayRef attrs) { - SmallVector sizeValues = llvm::to_vector<4>( - llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> ValueOrAttr { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> ValueOrAttr { return v; })); build(b, result, resultType, source, offset, sizeValues, strideValues, attrs); } @@ -2241,7 +2239,7 @@ // MulFOp //===----------------------------------------------------------------------===// -OpFoldResult MulFOp::fold(ArrayRef operands) { +ValueOrAttr MulFOp::fold(ArrayRef operands) { return constFoldBinaryOp( operands, [](APFloat a, APFloat b) { return a * b; }); } @@ -2250,7 +2248,7 @@ // MulIOp //===----------------------------------------------------------------------===// -OpFoldResult MulIOp::fold(ArrayRef operands) { +ValueOrAttr MulIOp::fold(ArrayRef operands) { /// muli(x, 0) -> 0 if (matchPattern(rhs(), m_Zero())) return rhs(); @@ -2267,7 +2265,7 @@ // OrOp //===----------------------------------------------------------------------===// -OpFoldResult OrOp::fold(ArrayRef operands) { +ValueOrAttr OrOp::fold(ArrayRef operands) { /// or(x, 0) -> x if (matchPattern(rhs(), m_Zero())) return lhs(); @@ -2344,7 +2342,7 @@ } LogicalResult PrefetchOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { // prefetch(memrefcast) -> prefetch return foldMemRefCast(*this); } @@ -2353,7 +2351,7 @@ // RankOp //===----------------------------------------------------------------------===// -OpFoldResult RankOp::fold(ArrayRef operands) { +ValueOrAttr RankOp::fold(ArrayRef operands) { // Constant fold rank when the rank of the operand is known. auto type = getOperand().getType(); if (auto shapedType = type.dyn_cast()) @@ -2392,7 +2390,7 @@ // SelectOp //===----------------------------------------------------------------------===// -OpFoldResult SelectOp::fold(ArrayRef operands) { +ValueOrAttr SelectOp::fold(ArrayRef operands) { auto condition = getCondition(); // select true, %0, %1 => %0 @@ -2486,7 +2484,7 @@ // SignedDivIOp //===----------------------------------------------------------------------===// -OpFoldResult SignedDivIOp::fold(ArrayRef operands) { +ValueOrAttr SignedDivIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "binary operation takes two operands"); // Don't fold if it would overflow or if it requires a division by zero. @@ -2522,7 +2520,7 @@ return val.sadd_ov(one, overflow); } -OpFoldResult SignedFloorDivIOp::fold(ArrayRef operands) { +ValueOrAttr SignedFloorDivIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "binary operation takes two operands"); // Don't fold if it would overflow or if it requires a division by zero. @@ -2572,7 +2570,7 @@ // SignedCeilDivIOp //===----------------------------------------------------------------------===// -OpFoldResult SignedCeilDivIOp::fold(ArrayRef operands) { +ValueOrAttr SignedCeilDivIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "binary operation takes two operands"); // Don't fold if it would overflow or if it requires a division by zero. @@ -2622,7 +2620,7 @@ // SignedRemIOp //===----------------------------------------------------------------------===// -OpFoldResult SignedRemIOp::fold(ArrayRef operands) { +ValueOrAttr SignedRemIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "remi_signed takes two operands"); auto rhs = operands.back().dyn_cast_or_null(); @@ -2672,7 +2670,7 @@ } // Constant folding hook for SplatOp. -OpFoldResult SplatOp::fold(ArrayRef operands) { +ValueOrAttr SplatOp::fold(ArrayRef operands) { assert(operands.size() == 1 && "splat takes one operand"); auto constOperand = operands.front(); @@ -2699,7 +2697,7 @@ } LogicalResult StoreOp::fold(ArrayRef cstOperands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { /// store(memrefcast) -> store return foldMemRefCast(*this); } @@ -2708,7 +2706,7 @@ // SubFOp //===----------------------------------------------------------------------===// -OpFoldResult SubFOp::fold(ArrayRef operands) { +ValueOrAttr SubFOp::fold(ArrayRef operands) { return constFoldBinaryOp( operands, [](APFloat a, APFloat b) { return a - b; }); } @@ -2717,7 +2715,7 @@ // SubIOp //===----------------------------------------------------------------------===// -OpFoldResult SubIOp::fold(ArrayRef operands) { +ValueOrAttr SubIOp::fold(ArrayRef operands) { // subi(x,x) -> 0 if (getOperand(0) == getOperand(1)) return Builder(getContext()).getZeroAttr(getType()); @@ -2832,17 +2830,17 @@ } Type SubViewOp::inferResultType(MemRefType sourceMemRefType, - ArrayRef leadingStaticOffsets, - ArrayRef leadingStaticSizes, - ArrayRef leadingStaticStrides) { + ArrayRef leadingStaticOffsets, + ArrayRef leadingStaticSizes, + ArrayRef leadingStaticStrides) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets, - staticOffsets, ShapedType::kDynamicStrideOrOffset); - dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides, - staticStrides, ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(leadingStaticOffsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(leadingStaticSizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttrs(leadingStaticStrides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); return SubViewOp::inferResultType(sourceMemRefType, staticOffsets, staticSizes, staticStrides) .cast(); @@ -2852,18 +2850,18 @@ // type. If the type passed is nullptr, it is inferred. void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, MemRefType resultType, Value source, - ArrayRef offsets, - ArrayRef sizes, - ArrayRef strides, + ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); - dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(offsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttrs(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); auto sourceMemRefType = source.getType().cast(); // Structuring implementation this way avoids duplication between builders. if (!resultType) { @@ -2880,9 +2878,9 @@ // Build a SubViewOp with mixed static and dynamic entries and inferred result // type. void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source, - ArrayRef offsets, - ArrayRef sizes, - ArrayRef strides, + ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); } @@ -2892,16 +2890,14 @@ ArrayRef offsets, ArrayRef sizes, ArrayRef strides, ArrayRef attrs) { - SmallVector offsetValues = llvm::to_vector<4>( - llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { - return b.getI64IntegerAttr(v); - })); - SmallVector sizeValues = - llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { + SmallVector sizeValues = llvm::to_vector<4>(llvm::map_range( + sizes, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); build(b, result, source, offsetValues, sizeValues, strideValues, attrs); @@ -2914,16 +2910,14 @@ ArrayRef offsets, ArrayRef sizes, ArrayRef strides, ArrayRef attrs) { - SmallVector offsetValues = llvm::to_vector<4>( - llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { - return b.getI64IntegerAttr(v); - })); - SmallVector sizeValues = - llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { + SmallVector sizeValues = llvm::to_vector<4>(llvm::map_range( + sizes, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [&](int64_t v) -> ValueOrAttr { return b.getI64IntegerAttr(v); })); build(b, result, resultType, source, offsetValues, sizeValues, strideValues, @@ -2937,12 +2931,12 @@ ValueRange offsets, ValueRange sizes, ValueRange strides, ArrayRef attrs) { - SmallVector offsetValues = llvm::to_vector<4>( - llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); - SmallVector sizeValues = llvm::to_vector<4>( - llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [](Value v) -> ValueOrAttr { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> ValueOrAttr { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> ValueOrAttr { return v; })); build(b, result, resultType, source, offsetValues, sizeValues, strideValues); } @@ -3162,9 +3156,9 @@ /// Detects the `values` produced by a ConstantIndexOp and places the new /// constant in place of the corresponding sentinel value. -void canonicalizeSubViewPart(SmallVectorImpl &values, +void canonicalizeSubViewPart(SmallVectorImpl &values, llvm::function_ref isDynamic) { - for (OpFoldResult &ofr : values) { + for (ValueOrAttr &ofr : values) { if (ofr.is()) continue; // Newly static, move from Value to constant. @@ -3200,9 +3194,9 @@ // At least one of offsets/sizes/strides is a new constant. // Form the new list of operands and constant attributes from the existing. - SmallVector mixedOffsets(op.getMixedOffsets()); - SmallVector mixedSizes(op.getMixedSizes()); - SmallVector mixedStrides(op.getMixedStrides()); + SmallVector mixedOffsets(op.getMixedOffsets()); + SmallVector mixedSizes(op.getMixedSizes()); + SmallVector mixedStrides(op.getMixedStrides()); canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset); canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic); canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset); @@ -3366,7 +3360,7 @@ SubViewOpMemRefCastFolder>(context); } -OpFoldResult SubViewOp::fold(ArrayRef operands) { +ValueOrAttr SubViewOp::fold(ArrayRef operands) { if (getResult().getType().cast().getRank() == 0 && source().getType().cast().getRank() == 0) return getViewSource(); @@ -3399,17 +3393,17 @@ } Type SubTensorOp::inferResultType(RankedTensorType sourceRankedTensorType, - ArrayRef leadingStaticOffsets, - ArrayRef leadingStaticSizes, - ArrayRef leadingStaticStrides) { + ArrayRef leadingStaticOffsets, + ArrayRef leadingStaticSizes, + ArrayRef leadingStaticStrides) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets, - staticOffsets, ShapedType::kDynamicStrideOrOffset); - dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides, - staticStrides, ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(leadingStaticOffsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(leadingStaticSizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttrs(leadingStaticStrides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); return SubTensorOp::inferResultType(sourceRankedTensorType, staticOffsets, staticSizes, staticStrides) .cast(); @@ -3419,18 +3413,18 @@ // type. If the type passed is nullptr, it is inferred. void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, RankedTensorType resultType, Value source, - ArrayRef offsets, - ArrayRef sizes, - ArrayRef strides, + ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); - dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(offsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttrs(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); auto sourceRankedTensorType = source.getType().cast(); // Structuring implementation this way avoids duplication between builders. if (!resultType) { @@ -3448,9 +3442,9 @@ // Build a SubTensorOp with mixed static and dynamic entries and inferred result // type. void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, - Value source, ArrayRef offsets, - ArrayRef sizes, - ArrayRef strides, + Value source, ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs); } @@ -3462,12 +3456,12 @@ ValueRange offsets, ValueRange sizes, ValueRange strides, ArrayRef attrs) { - SmallVector offsetValues = llvm::to_vector<4>( - llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); - SmallVector sizeValues = llvm::to_vector<4>( - llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [](Value v) -> ValueOrAttr { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> ValueOrAttr { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> ValueOrAttr { return v; })); build(b, result, resultType, source, offsetValues, sizeValues, strideValues); } @@ -3504,18 +3498,18 @@ // Build a SubTensorInsertOp with mixed static and dynamic entries. void mlir::SubTensorInsertOp::build(OpBuilder &b, OperationState &result, Value source, Value dest, - ArrayRef offsets, - ArrayRef sizes, - ArrayRef strides, + ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); - dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(offsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexValueOrAttrs(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexValueOrAttrs(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes, dynamicStrides, b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); @@ -3528,12 +3522,12 @@ ValueRange offsets, ValueRange sizes, ValueRange strides, ArrayRef attrs) { - SmallVector offsetValues = llvm::to_vector<4>( - llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); - SmallVector sizeValues = llvm::to_vector<4>( - llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); - SmallVector strideValues = llvm::to_vector<4>( - llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [](Value v) -> ValueOrAttr { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> ValueOrAttr { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> ValueOrAttr { return v; })); build(b, result, source, dest, offsetValues, sizeValues, strideValues); } @@ -3541,7 +3535,7 @@ // TensorLoadOp //===----------------------------------------------------------------------===// -OpFoldResult TensorLoadOp::fold(ArrayRef) { +ValueOrAttr TensorLoadOp::fold(ArrayRef) { if (auto tensorToMemref = memref().getDefiningOp()) return tensorToMemref.tensor(); return {}; @@ -3551,7 +3545,7 @@ // TensorToMemrefOp //===----------------------------------------------------------------------===// -OpFoldResult TensorToMemrefOp::fold(ArrayRef) { +ValueOrAttr TensorToMemrefOp::fold(ArrayRef) { if (auto tensorLoad = tensor().getDefiningOp()) if (tensorLoad.memref().getType() == getType()) return tensorLoad.memref(); @@ -3642,7 +3636,7 @@ return success(); } -OpFoldResult TransposeOp::fold(ArrayRef) { +ValueOrAttr TransposeOp::fold(ArrayRef) { if (succeeded(foldMemRefCast(*this))) return getResult(); return {}; @@ -3673,7 +3667,7 @@ // UnsignedDivIOp //===----------------------------------------------------------------------===// -OpFoldResult UnsignedDivIOp::fold(ArrayRef operands) { +ValueOrAttr UnsignedDivIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "binary operation takes two operands"); // Don't fold if it would require a division by zero. @@ -3702,7 +3696,7 @@ // UnsignedRemIOp //===----------------------------------------------------------------------===// -OpFoldResult UnsignedRemIOp::fold(ArrayRef operands) { +ValueOrAttr UnsignedRemIOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "remi_unsigned takes two operands"); auto rhs = operands.back().dyn_cast_or_null(); @@ -3894,7 +3888,7 @@ // XOrOp //===----------------------------------------------------------------------===// -OpFoldResult XOrOp::fold(ArrayRef operands) { +ValueOrAttr XOrOp::fold(ArrayRef operands) { /// xor(x, 0) -> x if (matchPattern(rhs(), m_Zero())) return lhs(); diff --git a/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp --- a/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp +++ b/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp @@ -83,7 +83,7 @@ return failure(); int64_t rank = shapeType.cast().getDimSize(0); - SmallVector sizes, strides; + SmallVector sizes, strides; sizes.resize(rank); strides.resize(rank); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -181,7 +181,7 @@ return success(); } -OpFoldResult ExtractOp::fold(ArrayRef operands) { +ValueOrAttr ExtractOp::fold(ArrayRef operands) { // The tensor operand must be a known constant. Attribute tensor = operands.front(); if (!tensor) diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -102,7 +102,7 @@ // Operator Folders. //===----------------------------------------------------------------------===// -OpFoldResult ConstOp::fold(ArrayRef operands) { +ValueOrAttr ConstOp::fold(ArrayRef operands) { assert(operands.empty() && "constant has no operands"); return valueAttr(); } diff --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/VectorOps.cpp @@ -1088,7 +1088,7 @@ return extractOp.getResult(); } -OpFoldResult ExtractOp::fold(ArrayRef) { +ValueOrAttr ExtractOp::fold(ArrayRef) { if (succeeded(foldExtractOpFromExtractChain(*this))) return getResult(); if (succeeded(foldExtractOpFromTranspose(*this))) @@ -1099,7 +1099,7 @@ return val; if (auto val = foldExtractFromShapeCast(*this)) return val; - return OpFoldResult(); + return ValueOrAttr(); } //===----------------------------------------------------------------------===// @@ -1224,7 +1224,7 @@ return success(); } -OpFoldResult ExtractMapOp::fold(ArrayRef operands) { +ValueOrAttr ExtractMapOp::fold(ArrayRef operands) { auto insert = vector().getDefiningOp(); if (insert == nullptr || getType() != insert.vector().getType() || ids() != insert.ids()) @@ -1285,7 +1285,7 @@ return success(); } -OpFoldResult BroadcastOp::fold(ArrayRef operands) { +ValueOrAttr BroadcastOp::fold(ArrayRef operands) { if (!operands[0]) return {}; auto vectorType = getVectorType(); @@ -1987,7 +1987,7 @@ return failure(); } -OpFoldResult ExtractStridedSliceOp::fold(ArrayRef operands) { +ValueOrAttr ExtractStridedSliceOp::fold(ArrayRef operands) { if (getVectorType() == getResult().getType()) return vector(); if (succeeded(foldExtractStridedOpFromInsertChain(*this))) @@ -2445,13 +2445,13 @@ return success(); } -OpFoldResult TransferReadOp::fold(ArrayRef) { +ValueOrAttr TransferReadOp::fold(ArrayRef) { /// transfer_read(memrefcast) -> transfer_read if (succeeded(foldTransferMaskAttribute(*this))) return getResult(); if (succeeded(foldMemRefCast(*this))) return getResult(); - return OpFoldResult(); + return ValueOrAttr(); } Optional> TransferReadOp::getShapeForUnroll() { @@ -2571,7 +2571,7 @@ } LogicalResult TransferWriteOp::fold(ArrayRef, - SmallVectorImpl &) { + SmallVectorImpl &) { if (succeeded(foldTransferMaskAttribute(*this))) return success(); return foldMemRefCast(*this); @@ -3019,7 +3019,7 @@ return success(); } -OpFoldResult ShapeCastOp::fold(ArrayRef operands) { +ValueOrAttr ShapeCastOp::fold(ArrayRef operands) { // Nop shape cast. if (source().getType() == result().getType()) return source(); @@ -3087,7 +3087,7 @@ return success(); } -OpFoldResult BitCastOp::fold(ArrayRef operands) { +ValueOrAttr BitCastOp::fold(ArrayRef operands) { // Nop cast. if (source().getType() == result().getType()) return source(); @@ -3220,7 +3220,7 @@ // Eliminates transpose operations, which produce values identical to their // input values. This happens when the dimensions of the input vector remain in // their original order after the transpose operation. -OpFoldResult vector::TransposeOp::fold(ArrayRef operands) { +ValueOrAttr vector::TransposeOp::fold(ArrayRef operands) { SmallVector transp; getTransp(transp); @@ -3352,7 +3352,7 @@ return success(); } -OpFoldResult TupleGetOp::fold(ArrayRef operands) { +ValueOrAttr TupleGetOp::fold(ArrayRef operands) { // Rewrite: // %t = vector.tuple .., %e_i, .. // %x = vector.tuple_get %t, i diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -2311,7 +2311,7 @@ "Expected memref rank to match the alloc rank"); ValueRange leadingIndices = xferOp.indices().take_front(xferOp.getLeadingShapedRank()); - SmallVector sizes; + SmallVector sizes; sizes.append(leadingIndices.begin(), leadingIndices.end()); xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) { using MapList = ArrayRef>; @@ -2328,11 +2328,11 @@ sizes.push_back(affineMin); }); - SmallVector indices = llvm::to_vector<4>(llvm::map_range( - xferOp.indices(), [](Value idx) -> OpFoldResult { return idx; })); + SmallVector indices = llvm::to_vector<4>(llvm::map_range( + xferOp.indices(), [](Value idx) -> ValueOrAttr { return idx; })); return std_sub_view( xferOp.source(), indices, sizes, - SmallVector(memrefRank, OpBuilder(xferOp).getIndexAttr(1))); + SmallVector(memrefRank, OpBuilder(xferOp).getIndexAttr(1))); } /// Given an `xferOp` for which: diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -420,7 +420,7 @@ matchPattern(op->getOperand(i), m_Constant(&constOperands[i])); // Try to fold the operation. - SmallVector foldResults; + SmallVector foldResults; if (failed(op->fold(constOperands, foldResults)) || foldResults.empty()) return cleanupFailure(); diff --git a/mlir/lib/IR/BuiltinDialect.cpp b/mlir/lib/IR/BuiltinDialect.cpp --- a/mlir/lib/IR/BuiltinDialect.cpp +++ b/mlir/lib/IR/BuiltinDialect.cpp @@ -253,7 +253,7 @@ LogicalResult UnrealizedConversionCastOp::fold(ArrayRef attrOperands, - SmallVectorImpl &foldResults) { + SmallVectorImpl &foldResults) { OperandRange operands = inputs(); if (operands.empty()) return failure(); diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp --- a/mlir/lib/IR/Operation.cpp +++ b/mlir/lib/IR/Operation.cpp @@ -574,7 +574,7 @@ /// Attempt to fold this operation using the Op's registered foldHook. LogicalResult Operation::fold(ArrayRef operands, - SmallVectorImpl &results) { + SmallVectorImpl &results) { // If we have a registered operation definition matching this one, use it to // try to constant fold the operation. auto *abstractOp = getAbstractOperation(); @@ -696,7 +696,7 @@ // Op Trait implementations //===----------------------------------------------------------------------===// -OpFoldResult OpTrait::impl::foldIdempotent(Operation *op) { +ValueOrAttr OpTrait::impl::foldIdempotent(Operation *op) { auto *argumentOp = op->getOperand(0).getDefiningOp(); if (argumentOp && op->getName() == argumentOp->getName()) { // Replace the outer operation output with the inner operation. @@ -706,7 +706,7 @@ return {}; } -OpFoldResult OpTrait::impl::foldInvolution(Operation *op) { +ValueOrAttr OpTrait::impl::foldInvolution(Operation *op) { auto *argumentOp = op->getOperand(0).getDefiningOp(); if (argumentOp && op->getName() == argumentOp->getName()) { // Replace the outer involutions output with inner's input. @@ -1187,7 +1187,7 @@ /// Attempt to fold the given cast operation. LogicalResult impl::foldCastInterfaceOp(Operation *op, ArrayRef attrOperands, - SmallVectorImpl &foldResults) { + SmallVectorImpl &foldResults) { OperandRange operands = op->getOperands(); if (operands.empty()) return failure(); diff --git a/mlir/lib/Transforms/SCCP.cpp b/mlir/lib/Transforms/SCCP.cpp --- a/mlir/lib/Transforms/SCCP.cpp +++ b/mlir/lib/Transforms/SCCP.cpp @@ -529,7 +529,7 @@ // Simulate the result of folding this operation to a constant. If folding // fails or was an in-place fold, mark the results as overdefined. - SmallVector foldResults; + SmallVector foldResults; foldResults.reserve(op->getNumResults()); if (failed(op->fold(operandConstants, foldResults))) return markAllOverdefined(op, op->getResults()); @@ -550,7 +550,7 @@ LatticeValue &resultLattice = latticeValues[op->getResult(i)]; // Merge in the result of the fold, either a constant or a value. - OpFoldResult foldResult = foldResults[i]; + ValueOrAttr foldResult = foldResults[i]; if (Attribute foldAttr = foldResult.dyn_cast()) meet(op, resultLattice, LatticeValue(foldAttr, opDialect)); else diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp --- a/mlir/lib/Transforms/Utils/FoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp @@ -179,7 +179,7 @@ OpBuilder &builder, Operation *op, SmallVectorImpl &results, function_ref processGeneratedConstants) { SmallVector operandConstants; - SmallVector foldResults; + SmallVector foldResults; // If this is a commutative operation, move constants to be trailing operands. if (op->getNumOperands() >= 2 && op->hasTrait()) { @@ -217,7 +217,7 @@ // Create the result constants and replace the results. auto *dialect = op->getDialect(); for (unsigned i = 0, e = op->getNumResults(); i != e; ++i) { - assert(!foldResults[i].isNull() && "expected valid OpFoldResult"); + assert(!foldResults[i].isNull() && "expected valid ValueOrAttr"); // Check if the result was an SSA value. if (auto repl = foldResults[i].dyn_cast()) { diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -618,23 +618,23 @@ results.insert(context); } -OpFoldResult TestOpWithRegionFold::fold(ArrayRef operands) { +ValueOrAttr TestOpWithRegionFold::fold(ArrayRef operands) { return operand(); } -OpFoldResult TestOpConstant::fold(ArrayRef operands) { +ValueOrAttr TestOpConstant::fold(ArrayRef operands) { return getValue(); } LogicalResult TestOpWithVariadicResultsAndFolder::fold( - ArrayRef operands, SmallVectorImpl &results) { + ArrayRef operands, SmallVectorImpl &results) { for (Value input : this->operands()) { results.push_back(input); } return success(); } -OpFoldResult TestOpInPlaceFold::fold(ArrayRef operands) { +ValueOrAttr TestOpInPlaceFold::fold(ArrayRef operands) { assert(operands.size() == 1); if (operands.front()) { (*this)->setAttr("attr", operands.front()); @@ -643,7 +643,7 @@ return {}; } -OpFoldResult TestPassthroughFold::fold(ArrayRef operands) { +ValueOrAttr TestPassthroughFold::fold(ArrayRef operands) { return getOperand(); } diff --git a/mlir/test/lib/Dialect/Test/TestTraits.cpp b/mlir/test/lib/Dialect/Test/TestTraits.cpp --- a/mlir/test/lib/Dialect/Test/TestTraits.cpp +++ b/mlir/test/lib/Dialect/Test/TestTraits.cpp @@ -17,17 +17,17 @@ // Trait Folder. //===----------------------------------------------------------------------===// -OpFoldResult TestInvolutionTraitFailingOperationFolderOp::fold( +ValueOrAttr TestInvolutionTraitFailingOperationFolderOp::fold( ArrayRef operands) { // This failure should cause the trait fold to run instead. return {}; } -OpFoldResult TestInvolutionTraitSuccesfulOperationFolderOp::fold( +ValueOrAttr TestInvolutionTraitSuccesfulOperationFolderOp::fold( ArrayRef operands) { auto argumentOp = getOperand(); // The success case should cause the trait fold to be supressed. - return argumentOp.getDefiningOp() ? argumentOp : OpFoldResult{}; + return argumentOp.getDefiningOp() ? argumentOp : ValueOrAttr{}; } namespace { diff --git a/mlir/test/mlir-tblgen/op-decl.td b/mlir/test/mlir-tblgen/op-decl.td --- a/mlir/test/mlir-tblgen/op-decl.td +++ b/mlir/test/mlir-tblgen/op-decl.td @@ -91,7 +91,7 @@ // CHECK: void print(::mlir::OpAsmPrinter &p); // CHECK: ::mlir::LogicalResult verify(); // CHECK: static void getCanonicalizationPatterns(::mlir::OwningRewritePatternList &results, ::mlir::MLIRContext *context); -// CHECK: ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::OpFoldResult> &results); +// CHECK: ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::ValueOrAttr> &results); // CHECK: // Display a graph for debugging purposes. // CHECK: void displayGraph(); // CHECK: }; diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-gen.cpp @@ -2053,7 +2053,7 @@ results.insert(); } LogicalResult {0}::fold(ArrayRef, - SmallVectorImpl &) {{ + SmallVectorImpl &) {{ return foldMemRefCast(*this); } void {0}::getEffects(SmallVectorImpl< diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -1691,12 +1691,12 @@ if (def.getValueAsBit("hasFolder")) { if (hasSingleResult) { opClass.addMethodAndPrune( - "::mlir::OpFoldResult", "fold", OpMethod::MP_Declaration, + "::mlir::ValueOrAttr", "fold", OpMethod::MP_Declaration, "::llvm::ArrayRef<::mlir::Attribute>", "operands"); } else { SmallVector paramList; paramList.emplace_back("::llvm::ArrayRef<::mlir::Attribute>", "operands"); - paramList.emplace_back("::llvm::SmallVectorImpl<::mlir::OpFoldResult> &", + paramList.emplace_back("::llvm::SmallVectorImpl<::mlir::ValueOrAttr> &", "results"); opClass.addMethodAndPrune("::mlir::LogicalResult", "fold", OpMethod::MP_Declaration, std::move(paramList));