diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td @@ -493,6 +493,9 @@ } static StringRef getMapAttrName() { return "map"; } + + // TODO: Remove once prefixing is flipped. + operand_range getIndices() { return indices(); } }]; } @@ -856,6 +859,9 @@ } static StringRef getMapAttrName() { return "map"; } + + // TODO: Remove once prefixing is flipped. + operand_range getIndices() { return indices(); } }]; } diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td @@ -1118,6 +1118,9 @@ LogicalResult reifyResultShapes(OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes); + // TODO: Remove once prefixing is flipped. + ArrayAttr getIteratorTypes() { return iterator_types(); } + //========================================================================// // Helper functions to mutate the `operand_segment_sizes` attribute. // These are useful when cloning and changing operand types. diff --git a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h --- a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h +++ b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h @@ -163,7 +163,7 @@ StructuredGenerator(OpBuilder &builder, StructuredOpInterface op) : builder(builder), ctx(op.getContext()), loc(op.getLoc()), - iterators(op.iterator_types()), maps(op.getIndexingMaps()), op(op) {} + iterators(op.getIteratorTypes()), maps(op.getIndexingMaps()), op(op) {} bool iters(ArrayRef its) { if (its.size() != iterators.size()) diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -219,18 +219,18 @@ ]; let extraClassDeclaration = [{ VectorType getLhsType() { - return lhs().getType().cast(); + return getLhs().getType().cast(); } VectorType getRhsType() { - return rhs().getType().cast(); + return getRhs().getType().cast(); } - Type getAccType() { return acc().getType(); } + Type getAccType() { return getAcc().getType(); } VectorType getLHSVectorMaskType() { - if (llvm::size(masks()) != 2) return VectorType(); + if (llvm::size(getMasks()) != 2) return VectorType(); return getOperand(3).getType().cast(); } VectorType getRHSVectorMaskType() { - if (llvm::size(masks()) != 2) return VectorType(); + if (llvm::size(getMasks()) != 2) return VectorType(); return getOperand(4).getType().cast(); } Type getResultType() { return getResult().getType(); } @@ -296,7 +296,7 @@ }]; let extraClassDeclaration = [{ VectorType getVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } }]; let builders = [ @@ -347,10 +347,10 @@ static StringRef getReductionDimsAttrStrName() { return "reduction_dims"; } VectorType getSourceVectorType() { - return source().getType().cast(); + return getSource().getType().cast(); } Type getDestType() { - return dest().getType(); + return getDest().getType(); } bool isReducedDim(int64_t d) { @@ -361,7 +361,7 @@ SmallVector getReductionMask() { SmallVector res(getSourceVectorType().getRank(), false); - for (auto ia : reduction_dims().getAsRange()) + for (auto ia : getReductionDims().getAsRange()) res[ia.getInt()] = true; return res; } @@ -415,9 +415,9 @@ ``` }]; let extraClassDeclaration = [{ - Type getSourceType() { return source().getType(); } + Type getSourceType() { return getSource().getType(); } VectorType getVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } }]; let assemblyFormat = "$source attr-dict `:` type($source) `to` type($vector)"; @@ -472,13 +472,13 @@ let extraClassDeclaration = [{ static StringRef getMaskAttrStrName() { return "mask"; } VectorType getV1VectorType() { - return v1().getType().cast(); + return getV1().getType().cast(); } VectorType getV2VectorType() { - return v2().getType().cast(); + return getV2().getType().cast(); } VectorType getVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } }]; let assemblyFormat = "operands $mask attr-dict `:` type(operands)"; @@ -526,7 +526,7 @@ ]; let extraClassDeclaration = [{ VectorType getVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } }]; let hasVerifier = 1; @@ -560,7 +560,7 @@ let extraClassDeclaration = [{ static StringRef getPositionAttrStrName() { return "position"; } VectorType getVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); }]; @@ -623,7 +623,7 @@ "AffineMap":$map)>]; let extraClassDeclaration = [{ VectorType getSourceVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } VectorType getResultType() { return getResult().getType().cast(); @@ -664,7 +664,7 @@ }]; let assemblyFormat = "$lhs `,` $rhs `,` $acc attr-dict `:` type($lhs)"; let extraClassDeclaration = [{ - VectorType getVectorType() { return lhs().getType().cast(); } + VectorType getVectorType() { return getLhs().getType().cast(); } }]; } @@ -707,9 +707,9 @@ OpBuilder<(ins "Value":$source, "Value":$dest)>, ]; let extraClassDeclaration = [{ - Type getSourceType() { return source().getType(); } + Type getSourceType() { return getSource().getType(); } VectorType getDestVectorType() { - return dest().getType().cast(); + return getDest().getType().cast(); } }]; let hasVerifier = 1; @@ -747,9 +747,9 @@ ]; let extraClassDeclaration = [{ static StringRef getPositionAttrStrName() { return "position"; } - Type getSourceType() { return source().getType(); } + Type getSourceType() { return getSource().getType(); } VectorType getDestVectorType() { - return dest().getType().cast(); + return getDest().getType().cast(); } }]; @@ -809,7 +809,7 @@ }]; let extraClassDeclaration = [{ VectorType getSourceVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } VectorType getResultType() { return getResult().getType().cast(); @@ -866,13 +866,13 @@ static StringRef getOffsetsAttrStrName() { return "offsets"; } static StringRef getStridesAttrStrName() { return "strides"; } VectorType getSourceVectorType() { - return source().getType().cast(); + return getSource().getType().cast(); } VectorType getDestVectorType() { - return dest().getType().cast(); + return getDest().getType().cast(); } bool hasNonUnitStrides() { - return llvm::any_of(strides(), [](Attribute attr) { + return llvm::any_of(getStrides(), [](Attribute attr) { return attr.cast().getInt() != 1; }); } @@ -947,15 +947,15 @@ ]; let extraClassDeclaration = [{ VectorType getOperandVectorTypeLHS() { - return lhs().getType().cast(); + return getLhs().getType().cast(); } Type getOperandTypeRHS() { - return rhs().getType(); + return getRhs().getType(); } VectorType getOperandVectorTypeACC() { - return (llvm::size(acc()) == 0) + return (llvm::size(getAcc()) == 0) ? VectorType() - : (*acc().begin()).getType().cast(); + : (*getAcc().begin()).getType().cast(); } VectorType getVectorType() { return getResult().getType().cast(); @@ -1065,17 +1065,17 @@ let extraClassDeclaration = [{ VectorType getInputVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } VectorType getOutputVectorType() { return getResult().getType().cast(); } /// Returns as integer value the number of input shape operands. - int64_t getNumInputShapeSizes() { return input_shape().size(); } + int64_t getNumInputShapeSizes() { return getInputShape().size(); } /// Returns as integer value the number of output shape operands. - int64_t getNumOutputShapeSizes() { return output_shape().size(); } + int64_t getNumOutputShapeSizes() { return getOutputShape().size(); } void getFixedVectorSizes(SmallVectorImpl &results); @@ -1133,10 +1133,10 @@ static StringRef getOffsetsAttrStrName() { return "offsets"; } static StringRef getSizesAttrStrName() { return "sizes"; } static StringRef getStridesAttrStrName() { return "strides"; } - VectorType getVectorType(){ return vector().getType().cast(); } + VectorType getVectorType(){ return getVector().getType().cast(); } void getOffsets(SmallVectorImpl &results); bool hasNonUnitStrides() { - return llvm::any_of(strides(), [](Attribute attr) { + return llvm::any_of(getStrides(), [](Attribute attr) { return attr.cast().getInt() != 1; }); } @@ -1558,11 +1558,11 @@ let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getVectorType() { - return result().getType().cast(); + return getResult().getType().cast(); } }]; @@ -1635,11 +1635,11 @@ let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getVectorType() { - return valueToStore().getType().cast(); + return getValueToStore().getType().cast(); } }]; @@ -1688,16 +1688,16 @@ }]; let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getMaskVectorType() { - return mask().getType().cast(); + return getMask().getType().cast(); } VectorType getPassThruVectorType() { - return pass_thru().getType().cast(); + return getPassThru().getType().cast(); } VectorType getVectorType() { - return result().getType().cast(); + return getResult().getType().cast(); } }]; let assemblyFormat = "$base `[` $indices `]` `,` $mask `,` $pass_thru attr-dict `:` " @@ -1744,13 +1744,13 @@ }]; let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getMaskVectorType() { - return mask().getType().cast(); + return getMask().getType().cast(); } VectorType getVectorType() { - return valueToStore().getType().cast(); + return getValueToStore().getType().cast(); } }]; let assemblyFormat = @@ -1803,19 +1803,19 @@ }]; let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getIndexVectorType() { - return index_vec().getType().cast(); + return getIndexVec().getType().cast(); } VectorType getMaskVectorType() { - return mask().getType().cast(); + return getMask().getType().cast(); } VectorType getPassThruVectorType() { - return pass_thru().getType().cast(); + return getPassThru().getType().cast(); } VectorType getVectorType() { - return result().getType().cast(); + return getResult().getType().cast(); } }]; let assemblyFormat = @@ -1870,16 +1870,16 @@ }]; let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getIndexVectorType() { - return index_vec().getType().cast(); + return getIndexVec().getType().cast(); } VectorType getMaskVectorType() { - return mask().getType().cast(); + return getMask().getType().cast(); } VectorType getVectorType() { - return valueToStore().getType().cast(); + return getValueToStore().getType().cast(); } }]; let assemblyFormat = @@ -1931,16 +1931,16 @@ }]; let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getMaskVectorType() { - return mask().getType().cast(); + return getMask().getType().cast(); } VectorType getPassThruVectorType() { - return pass_thru().getType().cast(); + return getPassThru().getType().cast(); } VectorType getVectorType() { - return result().getType().cast(); + return getResult().getType().cast(); } }]; let assemblyFormat = "$base `[` $indices `]` `,` $mask `,` $pass_thru attr-dict `:` " @@ -1989,13 +1989,13 @@ }]; let extraClassDeclaration = [{ MemRefType getMemRefType() { - return base().getType().cast(); + return getBase().getType().cast(); } VectorType getMaskVectorType() { - return mask().getType().cast(); + return getMask().getType().cast(); } VectorType getVectorType() { - return valueToStore().getType().cast(); + return getValueToStore().getType().cast(); } }]; let assemblyFormat = @@ -2045,7 +2045,7 @@ }]; let extraClassDeclaration = [{ VectorType getSourceVectorType() { - return source().getType().cast(); + return getSource().getType().cast(); } VectorType getResultVectorType() { return getResult().getType().cast(); @@ -2086,7 +2086,7 @@ }]; let extraClassDeclaration = [{ VectorType getSourceVectorType() { - return source().getType().cast(); + return getSource().getType().cast(); } VectorType getResultVectorType() { return getResult().getType().cast(); @@ -2129,13 +2129,13 @@ let extraClassDeclaration = [{ MemRefType getMemRefType() { - return memref().getType().cast(); + return getMemref().getType().cast(); } MemRefType getResultMemRefType() { return getResult().getType().cast(); } // Implement ViewLikeOpInterface. - Value getViewSource() { return memref(); } + Value getViewSource() { return getMemref(); } }]; let assemblyFormat = [{ @@ -2260,10 +2260,10 @@ ]; let extraClassDeclaration = [{ VectorType getVectorType() { - return vector().getType().cast(); + return getVector().getType().cast(); } VectorType getResultType() { - return result().getType().cast(); + return getResult().getType().cast(); } void getTransp(SmallVectorImpl &results); static StringRef getTranspAttrStrName() { return "transp"; } @@ -2303,7 +2303,7 @@ }]; let extraClassDeclaration = [{ Type getPrintType() { - return source().getType(); + return getSource().getType(); } }]; let assemblyFormat = "$source attr-dict `:` type($source)"; @@ -2530,16 +2530,16 @@ static StringRef getKindAttrStrName() { return "kind"; } static StringRef getReductionDimAttrStrName() { return "reduction_dim"; } VectorType getSourceType() { - return source().getType().cast(); + return getSource().getType().cast(); } VectorType getDestType() { - return dest().getType().cast(); + return getDest().getType().cast(); } VectorType getAccumulatorType() { - return accumulated_value().getType().cast(); + return getAccumulatedValue().getType().cast(); } VectorType getInitialValueType() { - return initial_value().getType().cast(); + return getInitialValue().getType().cast(); } }]; let assemblyFormat = diff --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td --- a/mlir/include/mlir/Interfaces/VectorInterfaces.td +++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td @@ -77,8 +77,8 @@ /*methodBody=*/"", /*defaultImplementation=*/[{ return $_op.isBroadcastDim(dim) - || ($_op.in_bounds() - && $_op.in_bounds()->template cast<::mlir::ArrayAttr>()[dim] + || ($_op.getInBounds() + && $_op.getInBounds()->template cast<::mlir::ArrayAttr>()[dim] .template cast<::mlir::BoolAttr>().getValue()); }] >, @@ -87,7 +87,7 @@ /*retTy=*/"::mlir::Value", /*methodName=*/"source", /*args=*/(ins), - /*methodBody=*/"return $_op.source();" + /*methodBody=*/"return $_op.getSource();" /*defaultImplementation=*/ >, InterfaceMethod< @@ -95,7 +95,7 @@ /*retTy=*/"::mlir::Value", /*methodName=*/"vector", /*args=*/(ins), - /*methodBody=*/"return $_op.vector();" + /*methodBody=*/"return $_op.getVector();" /*defaultImplementation=*/ >, InterfaceMethod< @@ -103,7 +103,7 @@ /*retTy=*/"::mlir::ValueRange", /*methodName=*/"indices", /*args=*/(ins), - /*methodBody=*/"return $_op.indices();" + /*methodBody=*/"return $_op.getIndices();" /*defaultImplementation=*/ >, InterfaceMethod< @@ -111,7 +111,7 @@ /*retTy=*/"::mlir::AffineMap", /*methodName=*/"permutation_map", /*args=*/(ins), - /*methodBody=*/"return $_op.permutation_map();" + /*methodBody=*/"return $_op.getPermutationMap();" /*defaultImplementation=*/ >, InterfaceMethod< @@ -121,7 +121,7 @@ /*args=*/(ins "unsigned":$idx), /*methodBody=*/"", /*defaultImplementation=*/[{ - auto expr = $_op.permutation_map().getResult(idx); + auto expr = $_op.getPermutationMap().getResult(idx); return expr.template isa<::mlir::AffineConstantExpr>() && expr.template dyn_cast<::mlir::AffineConstantExpr>().getValue() == 0; }] @@ -146,7 +146,7 @@ /*retTy=*/"::mlir::Optional<::mlir::ArrayAttr>", /*methodName=*/"in_bounds", /*args=*/(ins), - /*methodBody=*/"return $_op.in_bounds();" + /*methodBody=*/"return $_op.getInBounds();" /*defaultImplementation=*/ >, InterfaceMethod< @@ -156,7 +156,7 @@ /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/ - "return $_op.source().getType().template cast<::mlir::ShapedType>();" + "return $_op.getSource().getType().template cast<::mlir::ShapedType>();" >, InterfaceMethod< /*desc=*/"Return the VectorType.", @@ -165,7 +165,7 @@ /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/[{ - return $_op.vector().getType().template dyn_cast<::mlir::VectorType>(); + return $_op.getVector().getType().template dyn_cast<::mlir::VectorType>(); }] >, InterfaceMethod< @@ -175,9 +175,9 @@ /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/[{ - return $_op.mask() + return $_op.getMask() ? ::mlir::vector::detail::transferMaskType( - $_op.getVectorType(), $_op.permutation_map()) + $_op.getVectorType(), $_op.getPermutationMap()) : ::mlir::VectorType(); }] >, @@ -189,7 +189,7 @@ /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/ - "return $_op.permutation_map().getNumResults();" + "return $_op.getPermutationMap().getNumResults();" >, InterfaceMethod< /*desc=*/[{ Return the number of leading shaped dimensions that do not diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -32,14 +32,14 @@ // Return true if the contract op can be convert to MMA matmul. static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) { - if (llvm::size(contract.masks()) != 0) + if (llvm::size(contract.getMasks()) != 0) return false; using MapList = ArrayRef>; auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; AffineExpr m, n, k; bindDims(contract.getContext(), m, n, k); - auto iteratorTypes = contract.iterator_types().getValue(); + auto iteratorTypes = contract.getIteratorTypes().getValue(); if (!(isParallelIterator(iteratorTypes[0]) && isParallelIterator(iteratorTypes[1]) && isReductionIterator(iteratorTypes[2]))) @@ -75,12 +75,12 @@ // Return true if the transfer op can be converted to a MMA matrix load. static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) { - if (readOp.mask() || readOp.hasOutOfBoundsDim() || + if (readOp.getMask() || readOp.hasOutOfBoundsDim() || readOp.getVectorType().getRank() != 2) return false; if (!getMemrefConstantHorizontalStride(readOp.getShapedType())) return false; - AffineMap map = readOp.permutation_map(); + AffineMap map = readOp.getPermutationMap(); OpBuilder b(readOp.getContext()); AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1); AffineExpr zero = b.getAffineConstantExpr(0); @@ -98,13 +98,13 @@ if (writeOp.getTransferRank() == 0) return false; - if (writeOp.mask() || writeOp.hasOutOfBoundsDim() || + if (writeOp.getMask() || writeOp.hasOutOfBoundsDim() || writeOp.getVectorType().getRank() != 2) return false; if (!getMemrefConstantHorizontalStride(writeOp.getShapedType())) return false; // TODO: Support transpose once it is added to GPU dialect ops. - if (!writeOp.permutation_map().isMinorIdentity()) + if (!writeOp.getPermutationMap().isMinorIdentity()) return false; return true; } @@ -121,7 +121,7 @@ /// Return true if this is a broadcast from scalar to a 2D vector. static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) { return broadcastOp.getVectorType().getRank() == 2 && - broadcastOp.source().getType().isa(); + broadcastOp.getSource().getType().isa(); } /// Return the MMA elementwise enum associated with `op` if it is supported. @@ -239,7 +239,7 @@ LogicalResult matchAndRewrite(vector::ContractionOp op, PatternRewriter &rewriter) const override { Location loc = op.getLoc(); - Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc(); + Value lhs = op.getLhs(), rhs = op.getRhs(), res = op.getAcc(); // Set up the parallel/reduction structure in right form. using MapList = ArrayRef>; @@ -247,7 +247,7 @@ AffineExpr m, n, k; bindDims(rewriter.getContext(), m, n, k); static constexpr std::array perm = {1, 0}; - auto iteratorTypes = op.iterator_types().getValue(); + auto iteratorTypes = op.getIteratorTypes().getValue(); SmallVector maps = op.getIndexingMaps(); if (!(isParallelIterator(iteratorTypes[0]) && isParallelIterator(iteratorTypes[1]) && @@ -285,7 +285,7 @@ rewriter.replaceOpWithNewOp( op, lhs, rhs, res, rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})), - op.iterator_types()); + op.getIteratorTypes()); return success(); } }; @@ -298,7 +298,8 @@ LogicalResult matchAndRewrite(vector::TransposeOp op, PatternRewriter &rewriter) const override { - auto transferReadOp = op.vector().getDefiningOp(); + auto transferReadOp = + op.getVector().getDefiningOp(); if (!transferReadOp) return failure(); @@ -306,7 +307,7 @@ if (transferReadOp.getTransferRank() == 0) return failure(); - if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim()) + if (transferReadOp.getMask() || transferReadOp.hasOutOfBoundsDim()) return failure(); SmallVector perm; op.getTransp(perm); @@ -315,11 +316,13 @@ permU.push_back(unsigned(o)); AffineMap permutationMap = AffineMap::getPermutationMap(permU, op.getContext()); - AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map()); + AffineMap newMap = + permutationMap.compose(transferReadOp.getPermutationMap()); rewriter.replaceOpWithNewOp( - op, op.getType(), transferReadOp.source(), transferReadOp.indices(), - AffineMapAttr::get(newMap), transferReadOp.padding(), - transferReadOp.mask(), transferReadOp.in_boundsAttr()); + op, op.getType(), transferReadOp.getSource(), + transferReadOp.getIndices(), AffineMapAttr::get(newMap), + transferReadOp.getPadding(), transferReadOp.getMask(), + transferReadOp.getInBoundsAttr()); return success(); } }; @@ -336,9 +339,9 @@ auto contract = dyn_cast(users); if (!contract) continue; - if (contract.lhs() == op.getResult()) + if (contract.getLhs() == op.getResult()) return "AOp"; - if (contract.rhs() == op.getResult()) + if (contract.getRhs() == op.getResult()) return "BOp"; } return "COp"; @@ -350,7 +353,7 @@ assert(transferReadSupportsMMAMatrixType(op)); Optional stride = getMemrefConstantHorizontalStride(op.getShapedType()); - AffineMap map = op.permutation_map(); + AffineMap map = op.getPermutationMap(); // Handle broadcast by setting the stride to 0. if (map.getResult(0).isa()) { assert(map.getResult(0).cast().getValue() == 0); @@ -363,7 +366,8 @@ op.getVectorType().getElementType(), fragType); OpBuilder b(op); Value load = b.create( - op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride)); + op.getLoc(), type, op.getSource(), op.getIndices(), + b.getIndexAttr(*stride)); valueMapping[op.getResult()] = load; } @@ -374,18 +378,19 @@ getMemrefConstantHorizontalStride(op.getShapedType()); assert(stride); OpBuilder b(op); - Value matrix = valueMapping.find(op.vector())->second; - b.create( - op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride)); + Value matrix = valueMapping.find(op.getVector())->second; + b.create(op.getLoc(), matrix, op.getSource(), + op.getIndices(), + b.getIndexAttr(*stride)); op.erase(); } static void convertContractOp(vector::ContractionOp op, llvm::DenseMap &valueMapping) { OpBuilder b(op); - Value opA = valueMapping.find(op.lhs())->second; - Value opB = valueMapping.find(op.rhs())->second; - Value opC = valueMapping.find(op.acc())->second; + Value opA = valueMapping.find(op.getLhs())->second; + Value opB = valueMapping.find(op.getRhs())->second; + Value opC = valueMapping.find(op.getAcc())->second; Value matmul = b.create(op.getLoc(), opC.getType(), opA, opB, opC); valueMapping[op.getResult()] = matmul; @@ -419,7 +424,7 @@ gpu::MMAMatrixType type = gpu::MMAMatrixType::get( vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); auto matrix = b.create(op.getLoc(), type, - op.source()); + op.getSource()); valueMapping[op.getResult()] = matrix; } diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -154,9 +154,9 @@ matchAndRewrite(vector::MatmulOp matmulOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( - matmulOp, typeConverter->convertType(matmulOp.res().getType()), - adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), - matmulOp.lhs_columns(), matmulOp.rhs_columns()); + matmulOp, typeConverter->convertType(matmulOp.getRes().getType()), + adaptor.getLhs(), adaptor.getRhs(), matmulOp.getLhsRows(), + matmulOp.getLhsColumns(), matmulOp.getRhsColumns()); return success(); } }; @@ -172,8 +172,8 @@ matchAndRewrite(vector::FlatTransposeOp transOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( - transOp, typeConverter->convertType(transOp.res().getType()), - adaptor.matrix(), transOp.rows(), transOp.columns()); + transOp, typeConverter->convertType(transOp.getRes().getType()), + adaptor.getMatrix(), transOp.getRows(), transOp.getColumns()); return success(); } }; @@ -193,14 +193,14 @@ VectorType vectorTy, Value ptr, unsigned align, ConversionPatternRewriter &rewriter) { rewriter.replaceOpWithNewOp( - loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); + loadOp, vectorTy, ptr, adaptor.getMask(), adaptor.getPassThru(), align); } static void replaceLoadOrStoreOp(vector::StoreOp storeOp, vector::StoreOpAdaptor adaptor, VectorType vectorTy, Value ptr, unsigned align, ConversionPatternRewriter &rewriter) { - rewriter.replaceOpWithNewOp(storeOp, adaptor.valueToStore(), + rewriter.replaceOpWithNewOp(storeOp, adaptor.getValueToStore(), ptr, align); } @@ -209,7 +209,7 @@ VectorType vectorTy, Value ptr, unsigned align, ConversionPatternRewriter &rewriter) { rewriter.replaceOpWithNewOp( - storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); + storeOp, adaptor.getValueToStore(), ptr, adaptor.getMask(), align); } /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and @@ -239,8 +239,8 @@ // Resolve address. auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) .template cast(); - Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), - adaptor.indices(), rewriter); + Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.getBase(), + adaptor.getIndices(), rewriter); Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); @@ -268,16 +268,16 @@ // Resolve address. Value ptrs; VectorType vType = gather.getVectorType(); - Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), - adaptor.indices(), rewriter); - if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, - adaptor.index_vec(), memRefType, vType, ptrs))) + Value ptr = getStridedElementPtr(loc, memRefType, adaptor.getBase(), + adaptor.getIndices(), rewriter); + if (failed(getIndexedPtrs(rewriter, loc, adaptor.getBase(), ptr, + adaptor.getIndexVec(), memRefType, vType, ptrs))) return failure(); // Replace with the gather intrinsic. rewriter.replaceOpWithNewOp( - gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), - adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); + gather, typeConverter->convertType(vType), ptrs, adaptor.getMask(), + adaptor.getPassThru(), rewriter.getI32IntegerAttr(align)); return success(); } }; @@ -302,15 +302,15 @@ // Resolve address. Value ptrs; VectorType vType = scatter.getVectorType(); - Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), - adaptor.indices(), rewriter); - if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, - adaptor.index_vec(), memRefType, vType, ptrs))) + Value ptr = getStridedElementPtr(loc, memRefType, adaptor.getBase(), + adaptor.getIndices(), rewriter); + if (failed(getIndexedPtrs(rewriter, loc, adaptor.getBase(), ptr, + adaptor.getIndexVec(), memRefType, vType, ptrs))) return failure(); // Replace with the scatter intrinsic. rewriter.replaceOpWithNewOp( - scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), + scatter, adaptor.getValueToStore(), ptrs, adaptor.getMask(), rewriter.getI32IntegerAttr(align)); return success(); } @@ -330,11 +330,11 @@ // Resolve address. auto vtype = typeConverter->convertType(expand.getVectorType()); - Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), - adaptor.indices(), rewriter); + Value ptr = getStridedElementPtr(loc, memRefType, adaptor.getBase(), + adaptor.getIndices(), rewriter); rewriter.replaceOpWithNewOp( - expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); + expand, vtype, ptr, adaptor.getMask(), adaptor.getPassThru()); return success(); } }; @@ -352,11 +352,11 @@ MemRefType memRefType = compress.getMemRefType(); // Resolve address. - Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), - adaptor.indices(), rewriter); + Value ptr = getStridedElementPtr(loc, memRefType, adaptor.getBase(), + adaptor.getIndices(), rewriter); rewriter.replaceOpWithNewOp( - compress, adaptor.valueToStore(), ptr, adaptor.mask()); + compress, adaptor.getValueToStore(), ptr, adaptor.getMask()); return success(); } }; @@ -373,8 +373,8 @@ LogicalResult matchAndRewrite(vector::ReductionOp reductionOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - auto kind = reductionOp.kind(); - Type eltType = reductionOp.dest().getType(); + auto kind = reductionOp.getKind(); + Type eltType = reductionOp.getDest().getType(); Type llvmType = typeConverter->convertType(eltType); Value operand = adaptor.getOperands()[0]; if (eltType.isIntOrIndex()) { @@ -467,7 +467,7 @@ auto v2Type = shuffleOp.getV2VectorType(); auto vectorType = shuffleOp.getVectorType(); Type llvmType = typeConverter->convertType(vectorType); - auto maskArrayAttr = shuffleOp.mask(); + auto maskArrayAttr = shuffleOp.getMask(); // Bail if result type cannot be lowered. if (!llvmType) @@ -483,7 +483,7 @@ // there is direct shuffle support in LLVM. Use it! if (rank == 1 && v1Type == v2Type) { Value llvmShuffleOp = rewriter.create( - loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); + loc, adaptor.getV1(), adaptor.getV2(), maskArrayAttr); rewriter.replaceOp(shuffleOp, llvmShuffleOp); return success(); } @@ -498,10 +498,10 @@ int64_t insPos = 0; for (const auto &en : llvm::enumerate(maskArrayAttr)) { int64_t extPos = en.value().cast().getInt(); - Value value = adaptor.v1(); + Value value = adaptor.getV1(); if (extPos >= v1Dim) { extPos -= v1Dim; - value = adaptor.v2(); + value = adaptor.getV2(); } Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, eltType, rank, extPos); @@ -536,12 +536,12 @@ loc, typeConverter->convertType(idxType), rewriter.getIntegerAttr(idxType, 0)); rewriter.replaceOpWithNewOp( - extractEltOp, llvmType, adaptor.vector(), zero); + extractEltOp, llvmType, adaptor.getVector(), zero); return success(); } rewriter.replaceOpWithNewOp( - extractEltOp, llvmType, adaptor.vector(), adaptor.position()); + extractEltOp, llvmType, adaptor.getVector(), adaptor.getPosition()); return success(); } }; @@ -558,7 +558,7 @@ auto vectorType = extractOp.getVectorType(); auto resultType = extractOp.getResult().getType(); auto llvmResultType = typeConverter->convertType(resultType); - auto positionArrayAttr = extractOp.position(); + auto positionArrayAttr = extractOp.getPosition(); // Bail if result type cannot be lowered. if (!llvmResultType) @@ -566,21 +566,21 @@ // Extract entire vector. Should be handled by folder, but just to be safe. if (positionArrayAttr.empty()) { - rewriter.replaceOp(extractOp, adaptor.vector()); + rewriter.replaceOp(extractOp, adaptor.getVector()); return success(); } // One-shot extraction of vector from array (only requires extractvalue). if (resultType.isa()) { Value extracted = rewriter.create( - loc, llvmResultType, adaptor.vector(), positionArrayAttr); + loc, llvmResultType, adaptor.getVector(), positionArrayAttr); rewriter.replaceOp(extractOp, extracted); return success(); } // Potential extraction of 1-D vector from array. auto *context = extractOp->getContext(); - Value extracted = adaptor.vector(); + Value extracted = adaptor.getVector(); auto positionAttrs = positionArrayAttr.getValue(); if (positionAttrs.size() > 1) { auto oneDVectorType = reducedVectorTypeBack(vectorType); @@ -627,8 +627,8 @@ VectorType vType = fmaOp.getVectorType(); if (vType.getRank() != 1) return failure(); - rewriter.replaceOpWithNewOp(fmaOp, adaptor.lhs(), - adaptor.rhs(), adaptor.acc()); + rewriter.replaceOpWithNewOp( + fmaOp, adaptor.getLhs(), adaptor.getRhs(), adaptor.getAcc()); return success(); } }; @@ -655,13 +655,13 @@ loc, typeConverter->convertType(idxType), rewriter.getIntegerAttr(idxType, 0)); rewriter.replaceOpWithNewOp( - insertEltOp, llvmType, adaptor.dest(), adaptor.source(), zero); + insertEltOp, llvmType, adaptor.getDest(), adaptor.getSource(), zero); return success(); } rewriter.replaceOpWithNewOp( - insertEltOp, llvmType, adaptor.dest(), adaptor.source(), - adaptor.position()); + insertEltOp, llvmType, adaptor.getDest(), adaptor.getSource(), + adaptor.getPosition()); return success(); } }; @@ -678,7 +678,7 @@ auto sourceType = insertOp.getSourceType(); auto destVectorType = insertOp.getDestVectorType(); auto llvmResultType = typeConverter->convertType(destVectorType); - auto positionArrayAttr = insertOp.position(); + auto positionArrayAttr = insertOp.getPosition(); // Bail if result type cannot be lowered. if (!llvmResultType) @@ -687,14 +687,14 @@ // Overwrite entire vector with value. Should be handled by folder, but // just to be safe. if (positionArrayAttr.empty()) { - rewriter.replaceOp(insertOp, adaptor.source()); + rewriter.replaceOp(insertOp, adaptor.getSource()); return success(); } // One-shot insertion of a vector into an array (only requires insertvalue). if (sourceType.isa()) { Value inserted = rewriter.create( - loc, llvmResultType, adaptor.dest(), adaptor.source(), + loc, llvmResultType, adaptor.getDest(), adaptor.getSource(), positionArrayAttr); rewriter.replaceOp(insertOp, inserted); return success(); @@ -702,7 +702,7 @@ // Potential extraction of 1-D vector from array. auto *context = insertOp->getContext(); - Value extracted = adaptor.dest(); + Value extracted = adaptor.getDest(); auto positionAttrs = positionArrayAttr.getValue(); auto position = positionAttrs.back().cast(); auto oneDVectorType = destVectorType; @@ -720,15 +720,15 @@ auto constant = rewriter.create(loc, i64Type, position); Value inserted = rewriter.create( loc, typeConverter->convertType(oneDVectorType), extracted, - adaptor.source(), constant); + adaptor.getSource(), constant); // Potential insertion of resulting 1-D vector into array. if (positionAttrs.size() > 1) { auto nMinusOnePositionAttrs = ArrayAttr::get(context, positionAttrs.drop_back()); - inserted = rewriter.create(loc, llvmResultType, - adaptor.dest(), inserted, - nMinusOnePositionAttrs); + inserted = rewriter.create( + loc, llvmResultType, adaptor.getDest(), inserted, + nMinusOnePositionAttrs); } rewriter.replaceOp(insertOp, inserted); @@ -779,9 +779,9 @@ loc, elemType, rewriter.getZeroAttr(elemType)); Value desc = rewriter.create(loc, vType, zero); for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { - Value extrLHS = rewriter.create(loc, op.lhs(), i); - Value extrRHS = rewriter.create(loc, op.rhs(), i); - Value extrACC = rewriter.create(loc, op.acc(), i); + Value extrLHS = rewriter.create(loc, op.getLhs(), i); + Value extrRHS = rewriter.create(loc, op.getRhs(), i); + Value extrACC = rewriter.create(loc, op.getAcc(), i); Value fma = rewriter.create(loc, extrLHS, extrRHS, extrACC); desc = rewriter.create(loc, fma, desc, i); } @@ -974,7 +974,7 @@ // Unroll vector into elementary print calls. int64_t rank = vectorType ? vectorType.getRank() : 0; Type type = vectorType ? vectorType : eltType; - emitRanks(rewriter, printOp, adaptor.source(), type, printer, rank, + emitRanks(rewriter, printOp, adaptor.getSource(), type, printer, rank, conversion); emitCall(rewriter, printOp->getLoc(), LLVM::lookupOrCreatePrintNewlineFn( @@ -1084,13 +1084,13 @@ // For 0-d vector, we simply do `insertelement`. if (resultType.getRank() == 0) { rewriter.replaceOpWithNewOp( - splatOp, vectorType, undef, adaptor.input(), zero); + splatOp, vectorType, undef, adaptor.getInput(), zero); return success(); } // For 1-d vector, we additionally do a `vectorshuffle`. auto v = rewriter.create( - splatOp.getLoc(), vectorType, undef, adaptor.input(), zero); + splatOp.getLoc(), vectorType, undef, adaptor.getInput(), zero); int64_t width = splatOp.getType().cast().getDimSize(0); SmallVector zeroValues(width, 0); @@ -1135,7 +1135,7 @@ loc, typeConverter->convertType(rewriter.getIntegerType(32)), rewriter.getZeroAttr(rewriter.getIntegerType(32))); Value v = rewriter.create(loc, llvm1DVectorTy, vdesc, - adaptor.input(), zero); + adaptor.getInput(), zero); // Shuffle the value across the desired number of elements. int64_t width = resultType.getDimSize(resultType.getRank() - 1); diff --git a/mlir/lib/Conversion/VectorToROCDL/VectorToROCDL.cpp b/mlir/lib/Conversion/VectorToROCDL/VectorToROCDL.cpp --- a/mlir/lib/Conversion/VectorToROCDL/VectorToROCDL.cpp +++ b/mlir/lib/Conversion/VectorToROCDL/VectorToROCDL.cpp @@ -44,7 +44,7 @@ Type &vecTy, Value &dwordConfig, Value &vindex, Value &offsetSizeInBytes, Value &glc, Value &slc) { auto adaptor = TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary()); - rewriter.replaceOpWithNewOp(xferOp, adaptor.vector(), + rewriter.replaceOpWithNewOp(xferOp, adaptor.getVector(), dwordConfig, vindex, offsetSizeInBytes, glc, slc); return success(); @@ -68,10 +68,10 @@ return failure(); if (xferOp.getVectorType().getRank() > 1 || - llvm::size(xferOp.indices()) == 0) + llvm::size(xferOp.getIndices()) == 0) return failure(); - if (!xferOp.permutation_map().isMinorIdentity()) + if (!xferOp.getPermutationMap().isMinorIdentity()) return failure(); // Have it handled in vector->llvm conversion pass. @@ -105,7 +105,7 @@ // indices, so no need to calculate offset size in bytes again in // the MUBUF instruction. Value dataPtr = this->getStridedElementPtr( - loc, memRefType, adaptor.source(), adaptor.indices(), rewriter); + loc, memRefType, adaptor.getSource(), adaptor.getIndices(), rewriter); // 1. Create and fill a <4 x i32> dwordConfig with: // 1st two elements holding the address of dataPtr. diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -53,7 +53,7 @@ static Optional unpackedDim(OpTy xferOp) { // TODO: support 0-d corner case. assert(xferOp.getTransferRank() > 0 && "unexpected 0-d transfer"); - auto map = xferOp.permutation_map(); + auto map = xferOp.getPermutationMap(); if (auto expr = map.getResult(0).template dyn_cast()) { return expr.getPosition(); } @@ -69,7 +69,7 @@ static AffineMap unpackedPermutationMap(OpBuilder &b, OpTy xferOp) { // TODO: support 0-d corner case. assert(xferOp.getTransferRank() > 0 && "unexpected 0-d transfer"); - auto map = xferOp.permutation_map(); + auto map = xferOp.getPermutationMap(); return AffineMap::get(map.getNumDims(), 0, map.getResults().drop_front(), b.getContext()); } @@ -86,7 +86,7 @@ typename OpTy::Adaptor adaptor(xferOp); // Corresponding memref dim of the vector dim that is unpacked. auto dim = unpackedDim(xferOp); - auto prevIndices = adaptor.indices(); + auto prevIndices = adaptor.getIndices(); indices.append(prevIndices.begin(), prevIndices.end()); Location loc = xferOp.getLoc(); @@ -94,7 +94,7 @@ if (!isBroadcast) { AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); - Value offset = adaptor.indices()[dim.getValue()]; + Value offset = adaptor.getIndices()[dim.getValue()]; indices[dim.getValue()] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); } @@ -118,7 +118,7 @@ /// * The to-be-unpacked dim of xferOp is a broadcast. template static Value generateMaskCheck(OpBuilder &b, OpTy xferOp, Value iv) { - if (!xferOp.mask()) + if (!xferOp.getMask()) return Value(); if (xferOp.getMaskType().getRank() != 1) return Value(); @@ -126,7 +126,7 @@ return Value(); Location loc = xferOp.getLoc(); - return b.create(loc, xferOp.mask(), iv); + return b.create(loc, xferOp.getMask(), iv); } /// Helper function TransferOpConversion and TransferOp1dConversion. @@ -167,10 +167,11 @@ Location loc = xferOp.getLoc(); ImplicitLocOpBuilder lb(xferOp.getLoc(), b); if (!xferOp.isDimInBounds(0) && !isBroadcast) { - Value memrefDim = vector::createOrFoldDimOp(b, loc, xferOp.source(), *dim); + Value memrefDim = + vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim); AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); - Value base = xferOp.indices()[dim.getValue()]; + Value base = xferOp.getIndices()[dim.getValue()]; Value memrefIdx = makeComposedAffineApply(b, loc, d0 + d1, {base, iv}); cond = lb.create(arith::CmpIPredicate::sgt, memrefDim, memrefIdx); @@ -289,11 +290,11 @@ auto bufferType = MemRefType::get({}, xferOp.getVectorType()); result.dataBuffer = b.create(loc, bufferType); - if (xferOp.mask()) { - auto maskType = MemRefType::get({}, xferOp.mask().getType()); + if (xferOp.getMask()) { + auto maskType = MemRefType::get({}, xferOp.getMask().getType()); auto maskBuffer = b.create(loc, maskType); b.setInsertionPoint(xferOp); - b.create(loc, xferOp.mask(), maskBuffer); + b.create(loc, xferOp.getMask(), maskBuffer); result.maskBuffer = b.create(loc, maskBuffer); } @@ -319,8 +320,8 @@ /// is similar to Strategy::getBuffer. template static Value getMaskBuffer(OpTy xferOp) { - assert(xferOp.mask() && "Expected that transfer op has mask"); - auto loadOp = xferOp.mask().template getDefiningOp(); + assert(xferOp.getMask() && "Expected that transfer op has mask"); + auto loadOp = xferOp.getMask().template getDefiningOp(); assert(loadOp && "Expected transfer op mask produced by LoadOp"); return loadOp.getMemRef(); } @@ -401,15 +402,15 @@ Location loc = xferOp.getLoc(); auto bufferType = buffer.getType().dyn_cast(); auto vecType = bufferType.getElementType().dyn_cast(); - auto inBoundsAttr = dropFirstElem(b, xferOp.in_boundsAttr()); + auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); auto newXferOp = b.create( - loc, vecType, xferOp.source(), xferIndices, - AffineMapAttr::get(unpackedPermutationMap(b, xferOp)), xferOp.padding(), - Value(), inBoundsAttr); + loc, vecType, xferOp.getSource(), xferIndices, + AffineMapAttr::get(unpackedPermutationMap(b, xferOp)), + xferOp.getPadding(), Value(), inBoundsAttr); maybeApplyPassLabel(b, newXferOp, options.targetRank); - b.create(loc, newXferOp.vector(), buffer, storeIndices); + b.create(loc, newXferOp.getVector(), buffer, storeIndices); return newXferOp; } @@ -425,7 +426,7 @@ Location loc = xferOp.getLoc(); auto bufferType = buffer.getType().dyn_cast(); auto vecType = bufferType.getElementType().dyn_cast(); - auto vec = b.create(loc, vecType, xferOp.padding()); + auto vec = b.create(loc, vecType, xferOp.getPadding()); b.create(loc, vec, buffer, storeIndices); return Value(); @@ -453,7 +454,7 @@ /// vector.transfer_write %vec ... { __vector_to_scf_lowering__ } ... /// ``` static Value getBuffer(TransferWriteOp xferOp) { - auto loadOp = xferOp.vector().getDefiningOp(); + auto loadOp = xferOp.getVector().getDefiningOp(); assert(loadOp && "Expected transfer op vector produced by LoadOp"); return loadOp.getMemRef(); } @@ -461,7 +462,7 @@ /// Retrieve the indices of the current LoadOp that loads from the buffer. static void getBufferIndices(TransferWriteOp xferOp, SmallVector &indices) { - auto loadOp = xferOp.vector().getDefiningOp(); + auto loadOp = xferOp.getVector().getDefiningOp(); auto prevIndices = memref::LoadOpAdaptor(loadOp).indices(); indices.append(prevIndices.begin(), prevIndices.end()); } @@ -488,8 +489,8 @@ Location loc = xferOp.getLoc(); auto vec = b.create(loc, buffer, loadIndices); - auto inBoundsAttr = dropFirstElem(b, xferOp.in_boundsAttr()); - auto source = loopState.empty() ? xferOp.source() : loopState[0]; + auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); + auto source = loopState.empty() ? xferOp.getSource() : loopState[0]; Type type = isTensorOp(xferOp) ? xferOp.getShapedType() : Type(); auto newXferOp = b.create( loc, type, vec, source, xferIndices, @@ -521,7 +522,7 @@ /// Return the initial loop state for the generated scf.for loop. static Value initialLoopState(TransferWriteOp xferOp) { - return isTensorOp(xferOp) ? xferOp.source() : Value(); + return isTensorOp(xferOp) ? xferOp.getSource() : Value(); } }; @@ -576,8 +577,8 @@ auto buffers = allocBuffers(rewriter, xferOp); auto *newXfer = rewriter.clone(*xferOp.getOperation()); newXfer->setAttr(kPassLabel, rewriter.getUnitAttr()); - if (xferOp.mask()) { - dyn_cast(newXfer).maskMutable().assign( + if (xferOp.getMask()) { + dyn_cast(newXfer).getMaskMutable().assign( buffers.maskBuffer); } @@ -624,16 +625,18 @@ Location loc = xferOp.getLoc(); auto buffers = allocBuffers(rewriter, xferOp); - rewriter.create(loc, xferOp.vector(), buffers.dataBuffer); + rewriter.create(loc, xferOp.getVector(), + buffers.dataBuffer); auto loadedVec = rewriter.create(loc, buffers.dataBuffer); rewriter.updateRootInPlace(xferOp, [&]() { - xferOp.vectorMutable().assign(loadedVec); + xferOp.getVectorMutable().assign(loadedVec); xferOp->setAttr(kPassLabel, rewriter.getUnitAttr()); }); - if (xferOp.mask()) { - rewriter.updateRootInPlace( - xferOp, [&]() { xferOp.maskMutable().assign(buffers.maskBuffer); }); + if (xferOp.getMask()) { + rewriter.updateRootInPlace(xferOp, [&]() { + xferOp.getMaskMutable().assign(buffers.maskBuffer); + }); } return success(); @@ -694,7 +697,7 @@ // If the xferOp has a mask: Find and cast mask buffer. Value castedMaskBuffer; - if (xferOp.mask()) { + if (xferOp.getMask()) { auto maskBuffer = getMaskBuffer(xferOp); auto maskBufferType = maskBuffer.getType().template dyn_cast(); @@ -741,8 +744,8 @@ // the // unpacked dim is not a broadcast, no mask is // needed on the new transfer op. - if (xferOp.mask() && (xferOp.isBroadcastDim(0) || - xferOp.getMaskType().getRank() > 1)) { + if (xferOp.getMask() && (xferOp.isBroadcastDim(0) || + xferOp.getMaskType().getRank() > 1)) { OpBuilder::InsertionGuard guard(b); b.setInsertionPoint(newXfer); // Insert load before newXfer. @@ -755,8 +758,9 @@ auto mask = b.create(loc, castedMaskBuffer, loadIndices); - rewriter.updateRootInPlace( - newXfer, [&]() { newXfer.maskMutable().assign(mask); }); + rewriter.updateRootInPlace(newXfer, [&]() { + newXfer.getMaskMutable().assign(mask); + }); } return loopState.empty() ? Value() : newXfer->getResult(0); @@ -784,13 +788,13 @@ template static void maybeAssignMask(OpBuilder &b, OpTy xferOp, OpTy newXferOp, int64_t i) { - if (!xferOp.mask()) + if (!xferOp.getMask()) return; if (xferOp.isBroadcastDim(0)) { // To-be-unpacked dimension is a broadcast, which does not have a // corresponding mask dimension. Mask attribute remains unchanged. - newXferOp.maskMutable().assign(xferOp.mask()); + newXferOp.getMaskMutable().assign(xferOp.getMask()); return; } @@ -801,8 +805,8 @@ llvm::SmallVector indices({i}); Location loc = xferOp.getLoc(); - auto newMask = b.create(loc, xferOp.mask(), indices); - newXferOp.maskMutable().assign(newMask); + auto newMask = b.create(loc, xferOp.getMask(), indices); + newXferOp.getMaskMutable().assign(newMask); } // If we end up here: The mask of the old transfer op is 1D and the unpacked @@ -853,10 +857,10 @@ Value getResultVector(TransferReadOp xferOp, PatternRewriter &rewriter) const { if (auto insertOp = getInsertOp(xferOp)) - return insertOp.dest(); + return insertOp.getDest(); Location loc = xferOp.getLoc(); return rewriter.create(loc, xferOp.getVectorType(), - xferOp.padding()); + xferOp.getPadding()); } /// If the result of the TransferReadOp has exactly one user, which is a @@ -876,7 +880,7 @@ void getInsertionIndices(TransferReadOp xferOp, SmallVector &indices) const { if (auto insertOp = getInsertOp(xferOp)) { - llvm::for_each(insertOp.position(), [&](Attribute attr) { + llvm::for_each(insertOp.getPosition(), [&](Attribute attr) { indices.push_back(attr.dyn_cast().getInt()); }); } @@ -921,11 +925,11 @@ getInsertionIndices(xferOp, insertionIndices); insertionIndices.push_back(i); - auto inBoundsAttr = dropFirstElem(b, xferOp.in_boundsAttr()); + auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); auto newXferOp = b.create( - loc, newXferVecType, xferOp.source(), xferIndices, + loc, newXferVecType, xferOp.getSource(), xferIndices, AffineMapAttr::get(unpackedPermutationMap(b, xferOp)), - xferOp.padding(), Value(), inBoundsAttr); + xferOp.getPadding(), Value(), inBoundsAttr); maybeAssignMask(b, xferOp, newXferOp, i); return b.create(loc, newXferOp, vec, insertionIndices); @@ -988,13 +992,13 @@ /// Return the vector from which newly generated ExtracOps will extract. Value getDataVector(TransferWriteOp xferOp) const { if (auto extractOp = getExtractOp(xferOp)) - return extractOp.vector(); - return xferOp.vector(); + return extractOp.getVector(); + return xferOp.getVector(); } /// If the input of the given TransferWriteOp is an ExtractOp, return it. vector::ExtractOp getExtractOp(TransferWriteOp xferOp) const { - if (auto *op = xferOp.vector().getDefiningOp()) + if (auto *op = xferOp.getVector().getDefiningOp()) return dyn_cast(op); return vector::ExtractOp(); } @@ -1004,7 +1008,7 @@ void getExtractionIndices(TransferWriteOp xferOp, SmallVector &indices) const { if (auto extractOp = getExtractOp(xferOp)) { - llvm::for_each(extractOp.position(), [&](Attribute attr) { + llvm::for_each(extractOp.getPosition(), [&](Attribute attr) { indices.push_back(attr.dyn_cast().getInt()); }); } @@ -1026,7 +1030,7 @@ auto vec = getDataVector(xferOp); auto xferVecType = xferOp.getVectorType(); int64_t dimSize = xferVecType.getShape()[0]; - auto source = xferOp.source(); // memref or tensor to be written to. + auto source = xferOp.getSource(); // memref or tensor to be written to. auto sourceType = isTensorOp(xferOp) ? xferOp.getShapedType() : Type(); // Generate fully unrolled loop of transfer ops. @@ -1050,7 +1054,7 @@ auto extracted = b.create(loc, vec, extractionIndices); - auto inBoundsAttr = dropFirstElem(b, xferOp.in_boundsAttr()); + auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); auto newXferOp = b.create( loc, sourceType, extracted, source, xferIndices, AffineMapAttr::get(unpackedPermutationMap(b, xferOp)), Value(), @@ -1089,8 +1093,8 @@ static Optional get1dMemrefIndices(OpBuilder &b, OpTy xferOp, Value iv, SmallVector &memrefIndices) { - auto indices = xferOp.indices(); - auto map = xferOp.permutation_map(); + auto indices = xferOp.getIndices(); + auto map = xferOp.getPermutationMap(); assert(xferOp.getTransferRank() > 0 && "unexpected 0-d transfer"); memrefIndices.append(indices.begin(), indices.end()); @@ -1132,7 +1136,8 @@ b, xferOp, iv, dim, TypeRange(xferOp.getVectorType()), /*inBoundsCase=*/ [&](OpBuilder &b, Location loc) { - Value val = b.create(loc, xferOp.source(), indices); + Value val = + b.create(loc, xferOp.getSource(), indices); return b.create(loc, val, vec, iv); }, /*outOfBoundsCase=*/ @@ -1144,7 +1149,7 @@ // Inititalize vector with padding value. Location loc = xferOp.getLoc(); return b.create(loc, xferOp.getVectorType(), - xferOp.padding()); + xferOp.getPadding()); } }; @@ -1162,8 +1167,8 @@ b, xferOp, iv, dim, /*inBoundsCase=*/[&](OpBuilder &b, Location loc) { auto val = - b.create(loc, xferOp.vector(), iv); - b.create(loc, val, xferOp.source(), indices); + b.create(loc, xferOp.getVector(), iv); + b.create(loc, val, xferOp.getSource(), indices); }); b.create(loc); } @@ -1221,7 +1226,7 @@ // TODO: support 0-d corner case. if (xferOp.getTransferRank() == 0) return failure(); - auto map = xferOp.permutation_map(); + auto map = xferOp.getPermutationMap(); auto memRefType = xferOp.getShapedType().template dyn_cast(); if (!memRefType) diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -44,11 +44,11 @@ if (!dstType) return failure(); - if (dstType == adaptor.source().getType()) - rewriter.replaceOp(bitcastOp, adaptor.source()); + if (dstType == adaptor.getSource().getType()) + rewriter.replaceOp(bitcastOp, adaptor.getSource()); else rewriter.replaceOpWithNewOp(bitcastOp, dstType, - adaptor.source()); + adaptor.getSource()); return success(); } @@ -61,11 +61,11 @@ LogicalResult matchAndRewrite(vector::BroadcastOp broadcastOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - if (broadcastOp.source().getType().isa() || + if (broadcastOp.getSource().getType().isa() || !spirv::CompositeType::isValid(broadcastOp.getVectorType())) return failure(); SmallVector source(broadcastOp.getVectorType().getNumElements(), - adaptor.source()); + adaptor.getSource()); rewriter.replaceOpWithNewOp( broadcastOp, broadcastOp.getVectorType(), source); return success(); @@ -88,14 +88,14 @@ if (!dstType) return failure(); - if (adaptor.vector().getType().isa()) { - rewriter.replaceOp(extractOp, adaptor.vector()); + if (adaptor.getVector().getType().isa()) { + rewriter.replaceOp(extractOp, adaptor.getVector()); return success(); } - int32_t id = getFirstIntValue(extractOp.position()); + int32_t id = getFirstIntValue(extractOp.getPosition()); rewriter.replaceOpWithNewOp( - extractOp, adaptor.vector(), id); + extractOp, adaptor.getVector(), id); return success(); } }; @@ -111,10 +111,9 @@ if (!dstType) return failure(); - - uint64_t offset = getFirstIntValue(extractOp.offsets()); - uint64_t size = getFirstIntValue(extractOp.sizes()); - uint64_t stride = getFirstIntValue(extractOp.strides()); + uint64_t offset = getFirstIntValue(extractOp.getOffsets()); + uint64_t size = getFirstIntValue(extractOp.getSizes()); + uint64_t stride = getFirstIntValue(extractOp.getStrides()); if (stride != 1) return failure(); @@ -147,7 +146,8 @@ if (!spirv::CompositeType::isValid(fmaOp.getVectorType())) return failure(); rewriter.replaceOpWithNewOp( - fmaOp, fmaOp.getType(), adaptor.lhs(), adaptor.rhs(), adaptor.acc()); + fmaOp, fmaOp.getType(), adaptor.getLhs(), adaptor.getRhs(), + adaptor.getAcc()); return success(); } }; @@ -162,16 +162,16 @@ // Special case for inserting scalar values into size-1 vectors. if (insertOp.getSourceType().isIntOrFloat() && insertOp.getDestVectorType().getNumElements() == 1) { - rewriter.replaceOp(insertOp, adaptor.source()); + rewriter.replaceOp(insertOp, adaptor.getSource()); return success(); } if (insertOp.getSourceType().isa() || !spirv::CompositeType::isValid(insertOp.getDestVectorType())) return failure(); - int32_t id = getFirstIntValue(insertOp.position()); + int32_t id = getFirstIntValue(insertOp.getPosition()); rewriter.replaceOpWithNewOp( - insertOp, adaptor.source(), adaptor.dest(), id); + insertOp, adaptor.getSource(), adaptor.getDest(), id); return success(); } }; @@ -186,8 +186,8 @@ if (!spirv::CompositeType::isValid(extractElementOp.getVectorType())) return failure(); rewriter.replaceOpWithNewOp( - extractElementOp, extractElementOp.getType(), adaptor.vector(), - extractElementOp.position()); + extractElementOp, extractElementOp.getType(), adaptor.getVector(), + extractElementOp.getPosition()); return success(); } }; @@ -202,8 +202,8 @@ if (!spirv::CompositeType::isValid(insertElementOp.getDestVectorType())) return failure(); rewriter.replaceOpWithNewOp( - insertElementOp, insertElementOp.getType(), insertElementOp.dest(), - adaptor.source(), insertElementOp.position()); + insertElementOp, insertElementOp.getType(), insertElementOp.getDest(), + adaptor.getSource(), insertElementOp.getPosition()); return success(); } }; @@ -218,10 +218,10 @@ Value srcVector = adaptor.getOperands().front(); Value dstVector = adaptor.getOperands().back(); - uint64_t stride = getFirstIntValue(insertOp.strides()); + uint64_t stride = getFirstIntValue(insertOp.getStrides()); if (stride != 1) return failure(); - uint64_t offset = getFirstIntValue(insertOp.offsets()); + uint64_t offset = getFirstIntValue(insertOp.getOffsets()); if (srcVector.getType().isa()) { assert(!dstVector.getType().isa()); @@ -259,7 +259,8 @@ VectorType dstVecType = op.getType(); if (!spirv::CompositeType::isValid(dstVecType)) return failure(); - SmallVector source(dstVecType.getNumElements(), adaptor.input()); + SmallVector source(dstVecType.getNumElements(), + adaptor.getInput()); rewriter.replaceOpWithNewOp(op, dstVecType, source); return success(); @@ -281,19 +282,19 @@ auto oldSourceType = shuffleOp.getV1VectorType(); if (oldSourceType.getNumElements() > 1) { SmallVector components = llvm::to_vector<4>( - llvm::map_range(shuffleOp.mask(), [](Attribute attr) -> int32_t { + llvm::map_range(shuffleOp.getMask(), [](Attribute attr) -> int32_t { return attr.cast().getValue().getZExtValue(); })); rewriter.replaceOpWithNewOp( - shuffleOp, newResultType, adaptor.v1(), adaptor.v2(), + shuffleOp, newResultType, adaptor.getV1(), adaptor.getV2(), rewriter.getI32ArrayAttr(components)); return success(); } - SmallVector oldOperands = {adaptor.v1(), adaptor.v2()}; + SmallVector oldOperands = {adaptor.getV1(), adaptor.getV2()}; SmallVector newOperands; newOperands.reserve(oldResultType.getNumElements()); - for (const APInt &i : shuffleOp.mask().getAsValueRange()) { + for (const APInt &i : shuffleOp.getMask().getAsValueRange()) { newOperands.push_back(oldOperands[i.getZExtValue()]); } rewriter.replaceOpWithNewOp( diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp @@ -148,7 +148,7 @@ LLVM_DEBUG(DBGS() << "maybeTransferReadUser: " << *maybeTransferReadUser << "\n"); auto read = dyn_cast(maybeTransferReadUser); - if (read && read.indices() == write.transferWriteOp.indices() && + if (read && read.getIndices() == write.transferWriteOp.getIndices() && read.getVectorType() == write.transferWriteOp.getVectorType()) return HoistableRead{read, sliceOp}; } @@ -223,7 +223,7 @@ Value v = yieldOperand.get(); if (auto write = v.getDefiningOp()) { // Indexing must not depend on `forOp`. - for (Value operand : write.indices()) + for (Value operand : write.getIndices()) if (!forOp.isDefinedOutsideOfLoop(operand)) return HoistableWrite(); @@ -287,7 +287,7 @@ read.extractSliceOp.sourceMutable().assign( forOp.getInitArgs()[initArgNumber]); else - read.transferReadOp.sourceMutable().assign( + read.transferReadOp.getSourceMutable().assign( forOp.getInitArgs()[initArgNumber]); // Hoist write after. @@ -300,12 +300,12 @@ if (write.insertSliceOp) yieldOp->setOperand(initArgNumber, write.insertSliceOp.dest()); else - yieldOp->setOperand(initArgNumber, write.transferWriteOp.source()); + yieldOp->setOperand(initArgNumber, write.transferWriteOp.getSource()); // Rewrite `loop` with additional new yields. OpBuilder b(read.transferReadOp); - auto newForOp = cloneWithNewYields(b, forOp, read.transferReadOp.vector(), - write.transferWriteOp.vector()); + auto newForOp = cloneWithNewYields(b, forOp, read.transferReadOp.getVector(), + write.transferWriteOp.getVector()); // Transfer write has been hoisted, need to update the vector and tensor // source. Replace the result of the loop to use the new tensor created // outside the loop. @@ -314,17 +314,18 @@ if (write.insertSliceOp) { newForOp.getResult(initArgNumber) .replaceAllUsesWith(write.insertSliceOp.getResult()); - write.transferWriteOp.sourceMutable().assign(read.extractSliceOp.result()); + write.transferWriteOp.getSourceMutable().assign( + read.extractSliceOp.result()); write.insertSliceOp.destMutable().assign(read.extractSliceOp.source()); } else { newForOp.getResult(initArgNumber) .replaceAllUsesWith(write.transferWriteOp.getResult()); - write.transferWriteOp.sourceMutable().assign( + write.transferWriteOp.getSourceMutable().assign( newForOp.getResult(initArgNumber)); } // Always update with the newly yield tensor and vector. - write.transferWriteOp.vectorMutable().assign(newForOp.getResults().back()); + write.transferWriteOp.getVectorMutable().assign(newForOp.getResults().back()); } // To hoist transfer op on tensor the logic can be significantly simplified @@ -356,7 +357,7 @@ if (write.insertSliceOp) LLVM_DEBUG(DBGS() << "Candidate insert_slice for hoisting: " << *write.insertSliceOp.getOperation() << "\n"); - if (llvm::any_of(write.transferWriteOp.indices(), + if (llvm::any_of(write.transferWriteOp.getIndices(), [&forOp](Value index) { return !forOp.isDefinedOutsideOfLoop(index); })) @@ -425,7 +426,8 @@ vector::TransferWriteOp transferWrite; for (auto *sliceOp : llvm::reverse(forwardSlice)) { auto candidateWrite = dyn_cast(sliceOp); - if (!candidateWrite || candidateWrite.source() != transferRead.source()) + if (!candidateWrite || + candidateWrite.getSource() != transferRead.getSource()) continue; transferWrite = candidateWrite; } @@ -447,7 +449,7 @@ // 2. no other operations in the loop access the same memref except // for transfer_read/transfer_write accessing statically disjoint // slices. - if (transferRead.indices() != transferWrite.indices() && + if (transferRead.getIndices() != transferWrite.getIndices() && transferRead.getVectorType() == transferWrite.getVectorType()) return WalkResult::advance(); @@ -456,7 +458,7 @@ DominanceInfo dom(loop); if (!dom.properlyDominates(transferRead.getOperation(), transferWrite)) return WalkResult::advance(); - for (auto &use : transferRead.source().getUses()) { + for (auto &use : transferRead.getSource().getUses()) { if (!loop->isAncestor(use.getOwner())) continue; if (use.getOwner() == transferRead.getOperation() || @@ -493,12 +495,12 @@ // Rewrite `loop` with new yields by cloning and erase the original loop. OpBuilder b(transferRead); - auto newForOp = cloneWithNewYields(b, loop, transferRead.vector(), - transferWrite.vector()); + auto newForOp = cloneWithNewYields(b, loop, transferRead.getVector(), + transferWrite.getVector()); // Transfer write has been hoisted, need to update the written value to // the value yielded by the newForOp. - transferWrite.vector().replaceAllUsesWith( + transferWrite.getVector().replaceAllUsesWith( newForOp.getResults().take_back()[0]); changed = true; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -854,15 +854,15 @@ if (!padValue) return failure(); // Padding value of existing `xferOp` is unused. - if (xferOp.hasOutOfBoundsDim() || xferOp.mask()) + if (xferOp.hasOutOfBoundsDim() || xferOp.getMask()) return failure(); rewriter.updateRootInPlace(xferOp, [&]() { SmallVector inBounds(xferOp.getVectorType().getRank(), false); xferOp->setAttr(xferOp.getInBoundsAttrName(), rewriter.getBoolArrayAttr(inBounds)); - xferOp.sourceMutable().assign(padOp.source()); - xferOp.paddingMutable().assign(padValue); + xferOp.getSourceMutable().assign(padOp.source()); + xferOp.getPaddingMutable().assign(padValue); }); return success(); @@ -937,8 +937,8 @@ SmallVector inBounds(xferOp.getVectorType().getRank(), false); auto newXferOp = rewriter.replaceOpWithNewOp( - xferOp, padOp.source().getType(), xferOp.vector(), padOp.source(), - xferOp.indices(), xferOp.permutation_mapAttr(), xferOp.mask(), + xferOp, padOp.source().getType(), xferOp.getVector(), padOp.source(), + xferOp.getIndices(), xferOp.getPermutationMapAttr(), xferOp.getMask(), rewriter.getBoolArrayAttr(inBounds)); rewriter.replaceOp(trimPadding, newXferOp->getResult(0)); @@ -1182,11 +1182,11 @@ vector::TransferReadOp xferOp, PatternRewriter &rewriter) const { // TODO: support mask. - if (xferOp.mask()) + if (xferOp.getMask()) return failure(); // Transfer into `view`. - Value viewOrAlloc = xferOp.source(); + Value viewOrAlloc = xferOp.getSource(); if (!viewOrAlloc.getDefiningOp() && !viewOrAlloc.getDefiningOp()) return failure(); @@ -1234,7 +1234,7 @@ } } // Ensure padding matches. - if (maybeFillOp && xferOp.padding() != maybeFillOp.value()) + if (maybeFillOp && xferOp.getPadding() != maybeFillOp.value()) return failure(); if (maybeFillOp) LDBG("with maybeFillOp " << *maybeFillOp); @@ -1247,8 +1247,8 @@ // When forwarding to vector.transfer_read, the attribute must be reset // conservatively. Value res = rewriter.create( - xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.indices(), - xferOp.permutation_mapAttr(), xferOp.padding(), xferOp.mask(), + xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.getIndices(), + xferOp.getPermutationMapAttr(), xferOp.getPadding(), xferOp.getMask(), // in_bounds is explicitly reset /*inBoundsAttr=*/ArrayAttr()); @@ -1265,11 +1265,11 @@ LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite( vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const { // TODO: support mask. - if (xferOp.mask()) + if (xferOp.getMask()) return failure(); // Transfer into `viewOrAlloc`. - Value viewOrAlloc = xferOp.source(); + Value viewOrAlloc = xferOp.getSource(); if (!viewOrAlloc.getDefiningOp() && !viewOrAlloc.getDefiningOp()) return failure(); @@ -1305,8 +1305,8 @@ // When forwarding to vector.transfer_write, the attribute must be reset // conservatively. rewriter.create( - xferOp.getLoc(), xferOp.vector(), out, xferOp.indices(), - xferOp.permutation_mapAttr(), xferOp.mask(), + xferOp.getLoc(), xferOp.getVector(), out, xferOp.getIndices(), + xferOp.getPermutationMapAttr(), xferOp.getMask(), // in_bounds is explicitly reset /*inBoundsAttr=*/ArrayAttr()); diff --git a/mlir/lib/Dialect/MemRef/Transforms/FoldSubViewOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldSubViewOps.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/FoldSubViewOps.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/FoldSubViewOps.cpp @@ -96,10 +96,12 @@ return op.memref(); } -static Value getMemRefOperand(vector::TransferReadOp op) { return op.source(); } +static Value getMemRefOperand(vector::TransferReadOp op) { + return op.getSource(); +} static Value getMemRefOperand(vector::TransferWriteOp op) { - return op.source(); + return op.getSource(); } /// Given the permutation map of the original @@ -175,9 +177,9 @@ transferReadOp, transferReadOp.getVectorType(), subViewOp.source(), sourceIndices, getPermutationMapAttr(rewriter.getContext(), subViewOp, - transferReadOp.permutation_map()), - transferReadOp.padding(), - /*mask=*/Value(), transferReadOp.in_boundsAttr()); + transferReadOp.getPermutationMap()), + transferReadOp.getPadding(), + /*mask=*/Value(), transferReadOp.getInBoundsAttr()); } template @@ -196,11 +198,11 @@ if (transferWriteOp.getTransferRank() == 0) return; rewriter.replaceOpWithNewOp( - transferWriteOp, transferWriteOp.vector(), subViewOp.source(), + transferWriteOp, transferWriteOp.getVector(), subViewOp.source(), sourceIndices, getPermutationMapAttr(rewriter.getContext(), subViewOp, - transferWriteOp.permutation_map()), - transferWriteOp.in_boundsAttr()); + transferWriteOp.getPermutationMap()), + transferWriteOp.getInBoundsAttr()); } } // namespace @@ -215,7 +217,7 @@ SmallVector sourceIndices; if (failed(resolveSourceIndices(loadOp.getLoc(), rewriter, subViewOp, - loadOp.indices(), sourceIndices))) + loadOp.getIndices(), sourceIndices))) return failure(); replaceOp(loadOp, subViewOp, sourceIndices, rewriter); @@ -233,7 +235,7 @@ SmallVector sourceIndices; if (failed(resolveSourceIndices(storeOp.getLoc(), rewriter, subViewOp, - storeOp.indices(), sourceIndices))) + storeOp.getIndices(), sourceIndices))) return failure(); replaceOp(storeOp, subViewOp, sourceIndices, rewriter); diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -76,7 +76,7 @@ // Inspect constant mask index. If the index exceeds the // dimension size, all bits are set. If the index is zero // or less, no bits are set. - ArrayAttr masks = m.mask_dim_sizes(); + ArrayAttr masks = m.getMaskDimSizes(); assert(masks.size() == 1); int64_t i = masks[0].cast().getInt(); int64_t u = m.getType().getDimSize(0); @@ -140,18 +140,18 @@ bool mlir::vector::checkSameValueRAW(vector::TransferWriteOp defWrite, vector::TransferReadOp read) { - return !defWrite.hasOutOfBoundsDim() && !defWrite.mask() && !read.mask() && - defWrite.indices() == read.indices() && + return !defWrite.hasOutOfBoundsDim() && !defWrite.getMask() && + !read.getMask() && defWrite.getIndices() == read.getIndices() && defWrite.getVectorType() == read.getVectorType() && - defWrite.permutation_map() == read.permutation_map(); + defWrite.getPermutationMap() == read.getPermutationMap(); } bool mlir::vector::checkSameValueWAW(vector::TransferWriteOp write, vector::TransferWriteOp priorWrite) { - return priorWrite.indices() == write.indices() && - priorWrite.mask() == write.mask() && + return priorWrite.getIndices() == write.getIndices() && + priorWrite.getMask() == write.getMask() && priorWrite.getVectorType() == write.getVectorType() && - priorWrite.permutation_map() == write.permutation_map(); + priorWrite.getPermutationMap() == write.getPermutationMap(); } bool mlir::vector::isDisjointTransferIndices( @@ -348,10 +348,10 @@ DictionaryAttr attributes, RegionRange, SmallVectorImpl &inferredReturnTypes) { MultiDimReductionOp::Adaptor op(operands, attributes); - auto vectorType = op.source().getType().cast(); + auto vectorType = op.getSource().getType().cast(); SmallVector targetShape; for (auto it : llvm::enumerate(vectorType.getShape())) - if (!llvm::any_of(op.reduction_dims().getValue(), [&](Attribute attr) { + if (!llvm::any_of(op.getReductionDims().getValue(), [&](Attribute attr) { return attr.cast().getValue() == it.index(); })) targetShape.push_back(it.value()); @@ -367,7 +367,7 @@ OpFoldResult MultiDimReductionOp::fold(ArrayRef operands) { // Single parallel dim, this is a noop. if (getSourceVectorType().getRank() == 1 && !isReducedDim(0)) - return source(); + return getSource(); return {}; } @@ -397,17 +397,17 @@ return emitOpError("unsupported reduction rank: ") << rank; // Verify supported reduction kind. - Type eltType = dest().getType(); - if (!isSupportedCombiningKind(kind(), eltType)) + Type eltType = getDest().getType(); + if (!isSupportedCombiningKind(getKind(), eltType)) return emitOpError("unsupported reduction type '") - << eltType << "' for kind '" << stringifyCombiningKind(kind()) + << eltType << "' for kind '" << stringifyCombiningKind(getKind()) << "'"; // Verify optional accumulator. - if (acc()) { - if (kind() != CombiningKind::ADD && kind() != CombiningKind::MUL) + if (getAcc()) { + if (getKind() != CombiningKind::ADD && getKind() != CombiningKind::MUL) return emitOpError("no accumulator for reduction kind: ") - << stringifyCombiningKind(kind()); + << stringifyCombiningKind(getKind()); if (!eltType.isa()) return emitOpError("no accumulator for type: ") << eltType; } @@ -439,11 +439,11 @@ void ReductionOp::print(OpAsmPrinter &p) { p << " "; - kindAttr().print(p); - p << ", " << vector(); - if (acc()) - p << ", " << acc(); - p << " : " << vector().getType() << " into " << dest().getType(); + getKindAttr().print(p); + p << ", " << getVector(); + if (getAcc()) + p << ", " << getAcc(); + p << " : " << getVector().getType() << " into " << getDest().getType(); } Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op, @@ -582,13 +582,13 @@ attrs.push_back(attr); auto dictAttr = DictionaryAttr::get(getContext(), attrs); - p << " " << dictAttr << " " << lhs() << ", "; - p << rhs() << ", " << acc(); - if (masks().size() == 2) - p << ", " << masks(); + p << " " << dictAttr << " " << getLhs() << ", "; + p << getRhs() << ", " << getAcc(); + if (getMasks().size() == 2) + p << ", " << getMasks(); p.printOptionalAttrDict((*this)->getAttrs(), attrNames); - p << " : " << lhs().getType() << ", " << rhs().getType() << " into " + p << " : " << getLhs().getType() << ", " << getRhs().getType() << " into " << getResultType(); } @@ -696,14 +696,14 @@ auto resType = getResultType(); // Verify that an indexing map was specified for each vector operand. - if (indexing_maps().size() != 3) + if (getIndexingMaps().size() != 3) return emitOpError("expected an indexing map for each vector operand"); // Verify that each index map has 'numIterators' inputs, no symbols, and // that the number of map outputs equals the rank of its associated // vector operand. - unsigned numIterators = iterator_types().getValue().size(); - for (const auto &it : llvm::enumerate(indexing_maps())) { + unsigned numIterators = getIteratorTypes().getValue().size(); + for (const auto &it : llvm::enumerate(getIndexingMaps())) { auto index = it.index(); auto map = it.value(); if (map.getNumSymbols() != 0) @@ -759,7 +759,7 @@ // Verify supported combining kind. auto vectorType = resType.dyn_cast(); auto elementType = vectorType ? vectorType.getElementType() : resType; - if (!isSupportedCombiningKind(kind(), elementType)) + if (!isSupportedCombiningKind(getKind(), elementType)) return emitOpError("unsupported contraction type"); return success(); @@ -803,7 +803,7 @@ auto resVectorType = getResultType().dyn_cast(); SmallVector indexingMaps(getIndexingMaps()); SmallVector iterationShape; - for (const auto &it : llvm::enumerate(iterator_types())) { + for (const auto &it : llvm::enumerate(getIteratorTypes())) { // Search lhs/rhs map results for 'targetExpr'. auto targetExpr = getAffineDimExpr(it.index(), getContext()); auto iteratorTypeName = it.value().cast().getValue(); @@ -824,9 +824,9 @@ void ContractionOp::getIterationIndexMap( std::vector> &iterationIndexMap) { - unsigned numMaps = indexing_maps().size(); + unsigned numMaps = getIndexingMaps().size(); iterationIndexMap.resize(numMaps); - for (const auto &it : llvm::enumerate(indexing_maps())) { + for (const auto &it : llvm::enumerate(getIndexingMaps())) { auto index = it.index(); auto map = it.value(); for (unsigned i = 0, e = map.getNumResults(); i < e; ++i) { @@ -838,13 +838,13 @@ std::vector> ContractionOp::getContractingDimMap() { SmallVector indexingMaps(getIndexingMaps()); - return getDimMap(indexingMaps, iterator_types(), + return getDimMap(indexingMaps, getIteratorTypes(), getReductionIteratorTypeName(), getContext()); } std::vector> ContractionOp::getBatchDimMap() { SmallVector indexingMaps(getIndexingMaps()); - return getDimMap(indexingMaps, iterator_types(), + return getDimMap(indexingMaps, getIteratorTypes(), getParallelIteratorTypeName(), getContext()); } @@ -886,11 +886,11 @@ if (!contractionOp) return vector::ContractionOp(); if (auto maybeZero = dyn_cast_or_null( - contractionOp.acc().getDefiningOp())) { + contractionOp.getAcc().getDefiningOp())) { if (maybeZero.getValue() == - rewriter.getZeroAttr(contractionOp.acc().getType())) { + rewriter.getZeroAttr(contractionOp.getAcc().getType())) { BlockAndValueMapping bvm; - bvm.map(contractionOp.acc(), otherOperand); + bvm.map(contractionOp.getAcc(), otherOperand); auto newContraction = cast(rewriter.clone(*contractionOp, bvm)); rewriter.replaceOp(addOp, newContraction.getResult()); @@ -932,13 +932,13 @@ LogicalResult vector::ExtractElementOp::verify() { VectorType vectorType = getVectorType(); if (vectorType.getRank() == 0) { - if (position()) + if (getPosition()) return emitOpError("expected position to be empty with 0-D vector"); return success(); } if (vectorType.getRank() != 1) return emitOpError("unexpected >1 vector rank"); - if (!position()) + if (!getPosition()) return emitOpError("expected position for 1-D vector"); return success(); } @@ -968,11 +968,12 @@ RegionRange, SmallVectorImpl &inferredReturnTypes) { ExtractOp::Adaptor op(operands, attributes); - auto vectorType = op.vector().getType().cast(); - if (static_cast(op.position().size()) == vectorType.getRank()) { + auto vectorType = op.getVector().getType().cast(); + if (static_cast(op.getPosition().size()) == vectorType.getRank()) { inferredReturnTypes.push_back(vectorType.getElementType()); } else { - auto n = std::min(op.position().size(), vectorType.getRank() - 1); + auto n = + std::min(op.getPosition().size(), vectorType.getRank() - 1); inferredReturnTypes.push_back(VectorType::get( vectorType.getShape().drop_front(n), vectorType.getElementType())); } @@ -993,7 +994,7 @@ } LogicalResult vector::ExtractOp::verify() { - auto positionAttr = position().getValue(); + auto positionAttr = getPosition().getValue(); if (positionAttr.size() > static_cast(getVectorType().getRank())) return emitOpError( "expected position attribute of rank smaller than vector rank"); @@ -1019,19 +1020,19 @@ /// Fold the result of chains of ExtractOp in place by simply concatenating the /// positions. static LogicalResult foldExtractOpFromExtractChain(ExtractOp extractOp) { - if (!extractOp.vector().getDefiningOp()) + if (!extractOp.getVector().getDefiningOp()) return failure(); SmallVector globalPosition; ExtractOp currentOp = extractOp; - auto extrPos = extractVector(currentOp.position()); + auto extrPos = extractVector(currentOp.getPosition()); globalPosition.append(extrPos.rbegin(), extrPos.rend()); - while (ExtractOp nextOp = currentOp.vector().getDefiningOp()) { + while (ExtractOp nextOp = currentOp.getVector().getDefiningOp()) { currentOp = nextOp; - auto extrPos = extractVector(currentOp.position()); + auto extrPos = extractVector(currentOp.getPosition()); globalPosition.append(extrPos.rbegin(), extrPos.rend()); } - extractOp.setOperand(currentOp.vector()); + extractOp.setOperand(currentOp.getVector()); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); std::reverse(globalPosition.begin(), globalPosition.end()); @@ -1143,12 +1144,12 @@ ExtractFromInsertTransposeChainState::ExtractFromInsertTransposeChainState( ExtractOp e) : extractOp(e), vectorRank(extractOp.getVectorType().getRank()), - extractedRank(extractOp.position().size()) { + extractedRank(extractOp.getPosition().size()) { assert(vectorRank >= extractedRank && "extracted pos overflow"); sentinels.reserve(vectorRank - extractedRank); for (int64_t i = 0, e = vectorRank - extractedRank; i < e; ++i) sentinels.push_back(-(i + 1)); - extractPosition = extractVector(extractOp.position()); + extractPosition = extractVector(extractOp.getPosition()); llvm::append_range(extractPosition, sentinels); } @@ -1157,7 +1158,7 @@ LogicalResult ExtractFromInsertTransposeChainState::handleTransposeOp() { if (!nextTransposeOp) return failure(); - auto permutation = extractVector(nextTransposeOp.transp()); + auto permutation = extractVector(nextTransposeOp.getTransp()); AffineMap m = inversePermutation( AffineMap::getPermutationMap(permutation, extractOp.getContext())); extractPosition = applyPermutationMap(m, makeArrayRef(extractPosition)); @@ -1168,12 +1169,12 @@ LogicalResult ExtractFromInsertTransposeChainState::handleInsertOpWithMatchingPos( Value &res) { - auto insertedPos = extractVector(nextInsertOp.position()); + auto insertedPos = extractVector(nextInsertOp.getPosition()); if (makeArrayRef(insertedPos) != llvm::makeArrayRef(extractPosition).take_front(extractedRank)) return failure(); // Case 2.a. early-exit fold. - res = nextInsertOp.source(); + res = nextInsertOp.getSource(); // Case 2.b. if internal transposition is present, canFold will be false. return success(); } @@ -1183,7 +1184,7 @@ /// This method updates the internal state. LogicalResult ExtractFromInsertTransposeChainState::handleInsertOpWithPrefixPos(Value &res) { - auto insertedPos = extractVector(nextInsertOp.position()); + auto insertedPos = extractVector(nextInsertOp.getPosition()); if (!isContainedWithin(insertedPos, extractPosition)) return failure(); // Set leading dims to zero. @@ -1193,7 +1194,7 @@ extractPosition.begin() + insertedPos.size()); extractedRank = extractPosition.size() - sentinels.size(); // Case 3.a. early-exit fold (break and delegate to post-while path). - res = nextInsertOp.source(); + res = nextInsertOp.getSource(); // Case 3.b. if internal transposition is present, canFold will be false. return success(); } @@ -1204,28 +1205,28 @@ Value ExtractFromInsertTransposeChainState::tryToFoldExtractOpInPlace( Value source) { // If we can't fold (either internal transposition, or nothing to fold), bail. - bool nothingToFold = (source == extractOp.vector()); + bool nothingToFold = (source == extractOp.getVector()); if (nothingToFold || !canFold()) return Value(); // Otherwise, fold by updating the op inplace and return its result. OpBuilder b(extractOp.getContext()); extractOp->setAttr( - extractOp.positionAttrName(), + extractOp.getPositionAttrName(), b.getI64ArrayAttr( makeArrayRef(extractPosition).take_front(extractedRank))); - extractOp.vectorMutable().assign(source); + extractOp.getVectorMutable().assign(source); return extractOp.getResult(); } /// Iterate over producing insert and transpose ops until we find a fold. Value ExtractFromInsertTransposeChainState::fold() { - Value valueToExtractFrom = extractOp.vector(); + Value valueToExtractFrom = extractOp.getVector(); updateStateForNextIteration(valueToExtractFrom); while (nextInsertOp || nextTransposeOp) { // Case 1. If we hit a transpose, just compose the map and iterate. // Invariant: insert + transpose do not change rank, we can always compose. if (succeeded(handleTransposeOp())) { - valueToExtractFrom = nextTransposeOp.vector(); + valueToExtractFrom = nextTransposeOp.getVector(); updateStateForNextIteration(valueToExtractFrom); continue; } @@ -1242,13 +1243,13 @@ // Case 4: extractPositionRef intersects insertedPosRef on non-sentinel // values. This is a more difficult case and we bail. - auto insertedPos = extractVector(nextInsertOp.position()); + auto insertedPos = extractVector(nextInsertOp.getPosition()); if (isContainedWithin(extractPosition, insertedPos) || intersectsWhereNonNegative(extractPosition, insertedPos)) return Value(); // Case 5: No intersection, we forward the extract to insertOp.dest(). - valueToExtractFrom = nextInsertOp.dest(); + valueToExtractFrom = nextInsertOp.getDest(); updateStateForNextIteration(valueToExtractFrom); } // If after all this we can fold, go for it. @@ -1257,7 +1258,7 @@ /// Fold extractOp with scalar result coming from BroadcastOp or SplatOp. static Value foldExtractFromBroadcast(ExtractOp extractOp) { - Operation *defOp = extractOp.vector().getDefiningOp(); + Operation *defOp = extractOp.getVector().getDefiningOp(); if (!defOp || !isa(defOp)) return Value(); Value source = defOp->getOperand(0); @@ -1269,7 +1270,7 @@ unsigned broadcastSrcRank = getRank(source.getType()); unsigned extractResultRank = getRank(extractOp.getType()); if (extractResultRank < broadcastSrcRank) { - auto extractPos = extractVector(extractOp.position()); + auto extractPos = extractVector(extractOp.getPosition()); unsigned rankDiff = broadcastSrcRank - extractResultRank; extractPos.erase( extractPos.begin(), @@ -1286,7 +1287,7 @@ // Fold extractOp with source coming from ShapeCast op. static Value foldExtractFromShapeCast(ExtractOp extractOp) { - auto shapeCastOp = extractOp.vector().getDefiningOp(); + auto shapeCastOp = extractOp.getVector().getDefiningOp(); if (!shapeCastOp) return Value(); // Get the nth dimension size starting from lowest dimension. @@ -1312,7 +1313,7 @@ } // Extract the strides associated with the extract op vector source. Then use // this to calculate a linearized position for the extract. - auto extractedPos = extractVector(extractOp.position()); + auto extractedPos = extractVector(extractOp.getPosition()); std::reverse(extractedPos.begin(), extractedPos.end()); SmallVector strides; int64_t stride = 1; @@ -1339,14 +1340,14 @@ OpBuilder b(extractOp.getContext()); extractOp->setAttr(ExtractOp::getPositionAttrStrName(), b.getI64ArrayAttr(newPosition)); - extractOp.setOperand(shapeCastOp.source()); + extractOp.setOperand(shapeCastOp.getSource()); return extractOp.getResult(); } /// Fold an ExtractOp from ExtractStridedSliceOp. static Value foldExtractFromExtractStrided(ExtractOp extractOp) { auto extractStridedSliceOp = - extractOp.vector().getDefiningOp(); + extractOp.getVector().getDefiningOp(); if (!extractStridedSliceOp) return Value(); // Return if 'extractStridedSliceOp' has non-unit strides. @@ -1354,7 +1355,8 @@ return Value(); // Trim offsets for dimensions fully extracted. - auto sliceOffsets = extractVector(extractStridedSliceOp.offsets()); + auto sliceOffsets = + extractVector(extractStridedSliceOp.getOffsets()); while (!sliceOffsets.empty()) { size_t lastOffset = sliceOffsets.size() - 1; if (sliceOffsets.back() != 0 || @@ -1371,11 +1373,11 @@ if (destinationRank > extractStridedSliceOp.getVectorType().getRank() - sliceOffsets.size()) return Value(); - auto extractedPos = extractVector(extractOp.position()); + auto extractedPos = extractVector(extractOp.getPosition()); assert(extractedPos.size() >= sliceOffsets.size()); for (size_t i = 0, e = sliceOffsets.size(); i < e; i++) extractedPos[i] = extractedPos[i] + sliceOffsets[i]; - extractOp.vectorMutable().assign(extractStridedSliceOp.vector()); + extractOp.getVectorMutable().assign(extractStridedSliceOp.getVector()); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); extractOp->setAttr(ExtractOp::getPositionAttrStrName(), @@ -1388,16 +1390,16 @@ int64_t destinationRank = op.getType().isa() ? op.getType().cast().getRank() : 0; - auto insertOp = op.vector().getDefiningOp(); + auto insertOp = op.getVector().getDefiningOp(); while (insertOp) { int64_t insertRankDiff = insertOp.getDestVectorType().getRank() - insertOp.getSourceVectorType().getRank(); if (destinationRank > insertOp.getSourceVectorType().getRank()) return Value(); - auto insertOffsets = extractVector(insertOp.offsets()); - auto extractOffsets = extractVector(op.position()); + auto insertOffsets = extractVector(insertOp.getOffsets()); + auto extractOffsets = extractVector(op.getPosition()); - if (llvm::any_of(insertOp.strides(), [](Attribute attr) { + if (llvm::any_of(insertOp.getStrides(), [](Attribute attr) { return attr.cast().getInt() != 1; })) return Value(); @@ -1432,7 +1434,7 @@ insertRankDiff)) return Value(); } - op.vectorMutable().assign(insertOp.source()); + op.getVectorMutable().assign(insertOp.getSource()); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(op.getContext()); op->setAttr(ExtractOp::getPositionAttrStrName(), @@ -1441,14 +1443,14 @@ } // If the chunk extracted is disjoint from the chunk inserted, keep // looking in the insert chain. - insertOp = insertOp.dest().getDefiningOp(); + insertOp = insertOp.getDest().getDefiningOp(); } return Value(); } OpFoldResult ExtractOp::fold(ArrayRef) { - if (position().empty()) - return vector(); + if (getPosition().empty()) + return getVector(); if (succeeded(foldExtractOpFromExtractChain(*this))) return getResult(); if (auto res = ExtractFromInsertTransposeChainState(*this).fold()) @@ -1473,7 +1475,7 @@ LogicalResult matchAndRewrite(ExtractOp extractOp, PatternRewriter &rewriter) const override { - Operation *defOp = extractOp.vector().getDefiningOp(); + Operation *defOp = extractOp.getVector().getDefiningOp(); if (!defOp || !isa(defOp)) return failure(); Value source = defOp->getOperand(0); @@ -1504,7 +1506,7 @@ PatternRewriter &rewriter) const override { // Return if 'extractStridedSliceOp' operand is not defined by a // ConstantOp. - auto constantOp = extractOp.vector().getDefiningOp(); + auto constantOp = extractOp.getVector().getDefiningOp(); if (!constantOp) return failure(); auto dense = constantOp.getValue().dyn_cast(); @@ -1566,18 +1568,18 @@ if (getSourceVectorType().getDimSize(i) != getResultType().getDimSize(i)) numId++; } - if (numId != ids().size()) + if (numId != getIds().size()) return emitOpError("expected number of ids must match the number of " "dimensions distributed"); return success(); } OpFoldResult ExtractMapOp::fold(ArrayRef operands) { - auto insert = vector().getDefiningOp(); - if (insert == nullptr || getType() != insert.vector().getType() || - ids() != insert.ids()) + auto insert = getVector().getDefiningOp(); + if (insert == nullptr || getType() != insert.getVector().getType() || + getIds() != insert.getIds()) return {}; - return insert.vector(); + return insert.getVector(); } void ExtractMapOp::getMultiplicity(SmallVectorImpl &multiplicity) { @@ -1670,7 +1672,7 @@ OpFoldResult BroadcastOp::fold(ArrayRef operands) { if (getSourceType() == getVectorType()) - return source(); + return getSource(); if (!operands[0]) return {}; auto vectorType = getVectorType(); @@ -1689,11 +1691,11 @@ LogicalResult matchAndRewrite(BroadcastOp broadcastOp, PatternRewriter &rewriter) const override { - auto srcBroadcast = broadcastOp.source().getDefiningOp(); + auto srcBroadcast = broadcastOp.getSource().getDefiningOp(); if (!srcBroadcast) return failure(); rewriter.replaceOpWithNewOp( - broadcastOp, broadcastOp.getVectorType(), srcBroadcast.source()); + broadcastOp, broadcastOp.getVectorType(), srcBroadcast.getSource()); return success(); } }; @@ -1734,7 +1736,7 @@ return emitOpError("dimension mismatch"); } // Verify mask length. - auto maskAttr = mask().getValue(); + auto maskAttr = getMask().getValue(); int64_t maskLength = maskAttr.size(); if (maskLength <= 0) return emitOpError("invalid mask length"); @@ -1756,12 +1758,12 @@ RegionRange, SmallVectorImpl &inferredReturnTypes) { ShuffleOp::Adaptor op(operands, attributes); - auto v1Type = op.v1().getType().cast(); + auto v1Type = op.getV1().getType().cast(); // Construct resulting type: leading dimension matches mask length, // all trailing dimensions match the operands. SmallVector shape; shape.reserve(v1Type.getRank()); - shape.push_back(std::max(1, op.mask().size())); + shape.push_back(std::max(1, op.getMask().size())); llvm::append_range(shape, v1Type.getShape().drop_front()); inferredReturnTypes.push_back( VectorType::get(shape, v1Type.getElementType())); @@ -1783,7 +1785,7 @@ SmallVector results; auto lhsElements = lhs.cast().getValues(); auto rhsElements = rhs.cast().getValues(); - for (const auto &index : this->mask().getAsValueRange()) { + for (const auto &index : this->getMask().getAsValueRange()) { int64_t i = index.getZExtValue(); if (i >= lhsSize) { results.push_back(rhsElements[i - lhsSize]); @@ -1807,13 +1809,13 @@ LogicalResult InsertElementOp::verify() { auto dstVectorType = getDestVectorType(); if (dstVectorType.getRank() == 0) { - if (position()) + if (getPosition()) return emitOpError("expected position to be empty with 0-D vector"); return success(); } if (dstVectorType.getRank() != 1) return emitOpError("unexpected >1 vector rank"); - if (!position()) + if (!getPosition()) return emitOpError("expected position for 1-D vector"); return success(); } @@ -1841,7 +1843,7 @@ } LogicalResult InsertOp::verify() { - auto positionAttr = position().getValue(); + auto positionAttr = getPosition().getValue(); auto destVectorType = getDestVectorType(); if (positionAttr.size() > static_cast(destVectorType.getRank())) return emitOpError( @@ -1883,7 +1885,7 @@ srcVecType.getNumElements()) return failure(); rewriter.replaceOpWithNewOp( - insertOp, insertOp.getDestVectorType(), insertOp.source()); + insertOp, insertOp.getDestVectorType(), insertOp.getSource()); return success(); } }; @@ -1899,8 +1901,8 @@ // value. This happens when the source and destination vectors have identical // sizes. OpFoldResult vector::InsertOp::fold(ArrayRef operands) { - if (position().empty()) - return source(); + if (getPosition().empty()) + return getSource(); return {}; } @@ -1920,7 +1922,7 @@ if (getResultType().getDimSize(i) != getSourceVectorType().getDimSize(i)) numId++; } - if (numId != ids().size()) + if (numId != getIds().size()) return emitOpError("expected number of ids must match the number of " "dimensions distributed"); return success(); @@ -2037,8 +2039,8 @@ LogicalResult InsertStridedSliceOp::verify() { auto sourceVectorType = getSourceVectorType(); auto destVectorType = getDestVectorType(); - auto offsets = offsetsAttr(); - auto strides = stridesAttr(); + auto offsets = getOffsetsAttr(); + auto strides = getStridesAttr(); if (offsets.size() != static_cast(destVectorType.getRank())) return emitOpError( "expected offsets of same size as destination vector rank"); @@ -2072,7 +2074,7 @@ OpFoldResult InsertStridedSliceOp::fold(ArrayRef operands) { if (getSourceVectorType() == getDestVectorType()) - return source(); + return getSource(); return {}; } @@ -2088,12 +2090,12 @@ } void OuterProductOp::print(OpAsmPrinter &p) { - p << " " << lhs() << ", " << rhs(); - if (!acc().empty()) { - p << ", " << acc(); + p << " " << getLhs() << ", " << getRhs(); + if (!getAcc().empty()) { + p << ", " << getAcc(); p.printOptionalAttrDict((*this)->getAttrs()); } - p << " : " << lhs().getType() << ", " << rhs().getType(); + p << " : " << getLhs().getType() << ", " << getRhs().getType(); } ParseResult OuterProductOp::parse(OpAsmParser &parser, OperationState &result) { @@ -2163,7 +2165,7 @@ return emitOpError("expected operand #3 of same type as result type"); // Verify supported combining kind. - if (!isSupportedCombiningKind(kind(), vRES.getElementType())) + if (!isSupportedCombiningKind(getKind(), vRES.getElementType())) return emitOpError("unsupported outerproduct type"); return success(); @@ -2214,14 +2216,14 @@ auto isDefByConstant = [](Value operand) { return isa_and_nonnull(operand.getDefiningOp()); }; - if (llvm::all_of(input_shape(), isDefByConstant) && - llvm::all_of(output_shape(), isDefByConstant)) { + if (llvm::all_of(getInputShape(), isDefByConstant) && + llvm::all_of(getOutputShape(), isDefByConstant)) { int64_t numInputElements = 1; - for (auto operand : input_shape()) + for (auto operand : getInputShape()) numInputElements *= cast(operand.getDefiningOp()).value(); int64_t numOutputElements = 1; - for (auto operand : output_shape()) + for (auto operand : getOutputShape()) numOutputElements *= cast(operand.getDefiningOp()).value(); if (numInputElements != numOutputElements) @@ -2231,7 +2233,7 @@ } void ReshapeOp::getFixedVectorSizes(SmallVectorImpl &results) { - populateFromInt64AttrArray(fixed_vector_sizes(), results); + populateFromInt64AttrArray(getFixedVectorSizes(), results); } //===----------------------------------------------------------------------===// @@ -2274,9 +2276,9 @@ LogicalResult ExtractStridedSliceOp::verify() { auto type = getVectorType(); - auto offsets = offsetsAttr(); - auto sizes = sizesAttr(); - auto strides = stridesAttr(); + auto offsets = getOffsetsAttr(); + auto sizes = getSizesAttr(); + auto strides = getStridesAttr(); if (offsets.size() != sizes.size() || offsets.size() != strides.size()) return emitOpError("expected offsets, sizes and strides attributes of same size"); @@ -2316,16 +2318,16 @@ auto getElement = [](ArrayAttr array, int idx) { return array[idx].cast().getInt(); }; - ArrayAttr extractOffsets = op.offsets(); - ArrayAttr extractStrides = op.strides(); - ArrayAttr extractSizes = op.sizes(); - auto insertOp = op.vector().getDefiningOp(); + ArrayAttr extractOffsets = op.getOffsets(); + ArrayAttr extractStrides = op.getStrides(); + ArrayAttr extractSizes = op.getSizes(); + auto insertOp = op.getVector().getDefiningOp(); while (insertOp) { if (op.getVectorType().getRank() != insertOp.getSourceVectorType().getRank()) return failure(); - ArrayAttr insertOffsets = insertOp.offsets(); - ArrayAttr insertStrides = insertOp.strides(); + ArrayAttr insertOffsets = insertOp.getOffsets(); + ArrayAttr insertStrides = insertOp.getStrides(); // If the rank of extract is greater than the rank of insert, we are likely // extracting a partial chunk of the vector inserted. if (extractOffsets.size() > insertOffsets.size()) @@ -2354,7 +2356,7 @@ } // The extract element chunk is a subset of the insert element. if (!disjoint && !patialoverlap) { - op.setOperand(insertOp.source()); + op.setOperand(insertOp.getSource()); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(op.getContext()); op->setAttr(ExtractStridedSliceOp::getOffsetsAttrStrName(), @@ -2364,7 +2366,7 @@ // If the chunk extracted is disjoint from the chunk inserted, keep looking // in the insert chain. if (disjoint) - insertOp = insertOp.dest().getDefiningOp(); + insertOp = insertOp.getDest().getDefiningOp(); else { // The extracted vector partially overlap the inserted vector, we cannot // fold. @@ -2376,14 +2378,14 @@ OpFoldResult ExtractStridedSliceOp::fold(ArrayRef operands) { if (getVectorType() == getResult().getType()) - return vector(); + return getVector(); if (succeeded(foldExtractStridedOpFromInsertChain(*this))) return getResult(); return {}; } void ExtractStridedSliceOp::getOffsets(SmallVectorImpl &results) { - populateFromInt64AttrArray(offsets(), results); + populateFromInt64AttrArray(getOffsets(), results); } namespace { @@ -2399,7 +2401,7 @@ PatternRewriter &rewriter) const override { // Return if 'extractStridedSliceOp' operand is not defined by a // ConstantMaskOp. - auto *defOp = extractStridedSliceOp.vector().getDefiningOp(); + auto *defOp = extractStridedSliceOp.getVector().getDefiningOp(); auto constantMaskOp = dyn_cast_or_null(defOp); if (!constantMaskOp) return failure(); @@ -2408,12 +2410,13 @@ return failure(); // Gather constant mask dimension sizes. SmallVector maskDimSizes; - populateFromInt64AttrArray(constantMaskOp.mask_dim_sizes(), maskDimSizes); + populateFromInt64AttrArray(constantMaskOp.getMaskDimSizes(), maskDimSizes); // Gather strided slice offsets and sizes. SmallVector sliceOffsets; - populateFromInt64AttrArray(extractStridedSliceOp.offsets(), sliceOffsets); + populateFromInt64AttrArray(extractStridedSliceOp.getOffsets(), + sliceOffsets); SmallVector sliceSizes; - populateFromInt64AttrArray(extractStridedSliceOp.sizes(), sliceSizes); + populateFromInt64AttrArray(extractStridedSliceOp.getSizes(), sliceSizes); // Compute slice of vector mask region. SmallVector sliceMaskDimSizes; @@ -2452,7 +2455,7 @@ // Return if 'extractStridedSliceOp' operand is not defined by a // ConstantOp. auto constantOp = - extractStridedSliceOp.vector().getDefiningOp(); + extractStridedSliceOp.getVector().getDefiningOp(); if (!constantOp) return failure(); auto dense = constantOp.getValue().dyn_cast(); @@ -2475,10 +2478,10 @@ LogicalResult matchAndRewrite(ExtractStridedSliceOp op, PatternRewriter &rewriter) const override { - auto broadcast = op.vector().getDefiningOp(); + auto broadcast = op.getVector().getDefiningOp(); if (!broadcast) return failure(); - auto srcVecType = broadcast.source().getType().dyn_cast(); + auto srcVecType = broadcast.getSource().getType().dyn_cast(); unsigned srcRrank = srcVecType ? srcVecType.getRank() : 0; auto dstVecType = op.getType().cast(); unsigned dstRank = dstVecType.getRank(); @@ -2493,15 +2496,15 @@ break; } } - Value source = broadcast.source(); + Value source = broadcast.getSource(); if (!lowerDimMatch) { // The inner dimensions don't match, it means we need to extract from the // source of the orignal broadcast and then broadcast the extracted value. source = rewriter.create( op->getLoc(), source, - getI64SubArray(op.offsets(), /* dropFront=*/rankDiff), - getI64SubArray(op.sizes(), /* dropFront=*/rankDiff), - getI64SubArray(op.strides(), /* dropFront=*/rankDiff)); + getI64SubArray(op.getOffsets(), /* dropFront=*/rankDiff), + getI64SubArray(op.getSizes(), /* dropFront=*/rankDiff), + getI64SubArray(op.getStrides(), /* dropFront=*/rankDiff)); } rewriter.replaceOpWithNewOp(op, op.getType(), source); return success(); @@ -2515,10 +2518,10 @@ LogicalResult matchAndRewrite(ExtractStridedSliceOp op, PatternRewriter &rewriter) const override { - auto splat = op.vector().getDefiningOp(); + auto splat = op.getVector().getDefiningOp(); if (!splat) return failure(); - rewriter.replaceOpWithNewOp(op, op.getType(), splat.input()); + rewriter.replaceOpWithNewOp(op, op.getType(), splat.getInput()); return success(); } }; @@ -2726,9 +2729,9 @@ } void TransferReadOp::print(OpAsmPrinter &p) { - p << " " << source() << "[" << indices() << "], " << padding(); - if (mask()) - p << ", " << mask(); + p << " " << getSource() << "[" << getIndices() << "], " << getPadding(); + if (getMask()) + p << ", " << getMask(); printTransferAttrs(p, *this); p << " : " << getShapedType() << ", " << getVectorType(); } @@ -2798,16 +2801,16 @@ ShapedType shapedType = getShapedType(); VectorType vectorType = getVectorType(); VectorType maskType = getMaskType(); - auto paddingType = padding().getType(); - auto permutationMap = permutation_map(); + auto paddingType = getPadding().getType(); + auto permutationMap = getPermutationMap(); auto sourceElementType = shapedType.getElementType(); - if (static_cast(indices().size()) != shapedType.getRank()) + if (static_cast(getIndices().size()) != shapedType.getRank()) return emitOpError("requires ") << shapedType.getRank() << " indices"; if (failed(verifyTransferOp(cast(getOperation()), shapedType, vectorType, maskType, permutationMap, - in_bounds() ? *in_bounds() : ArrayAttr()))) + getInBounds() ? *getInBounds() : ArrayAttr()))) return failure(); if (auto sourceVectorElementType = sourceElementType.dyn_cast()) { @@ -2867,7 +2870,7 @@ // `op.indices()[indicesIdx] + vectorType < dim(op.source(), indicesIdx)` if (op.getShapedType().isDynamicDim(indicesIdx)) return false; - Value index = op.indices()[indicesIdx]; + Value index = op.getIndices()[indicesIdx]; auto cstOp = index.getDefiningOp(); if (!cstOp) return false; @@ -2884,7 +2887,7 @@ // TODO: Be less conservative. if (op.getTransferRank() == 0) return failure(); - AffineMap permutationMap = op.permutation_map(); + AffineMap permutationMap = op.getPermutationMap(); bool changed = false; SmallVector newInBounds; newInBounds.reserve(op.getTransferRank()); @@ -2926,15 +2929,15 @@ static Value foldRAW(TransferReadOp readOp) { if (!readOp.getShapedType().isa()) return {}; - auto defWrite = readOp.source().getDefiningOp(); + auto defWrite = readOp.getSource().getDefiningOp(); while (defWrite) { if (checkSameValueRAW(defWrite, readOp)) - return defWrite.vector(); + return defWrite.getVector(); if (!isDisjointTransferIndices( cast(defWrite.getOperation()), cast(readOp.getOperation()))) break; - defWrite = defWrite.source().getDefiningOp(); + defWrite = defWrite.getSource().getDefiningOp(); } return {}; } @@ -2960,7 +2963,7 @@ SmallVectorImpl> &effects) { if (getShapedType().isa()) - effects.emplace_back(MemoryEffects::Read::get(), source(), + effects.emplace_back(MemoryEffects::Read::get(), getSource(), SideEffects::DefaultResource::get()); } @@ -2992,11 +2995,11 @@ return failure(); if (xferOp.hasOutOfBoundsDim()) return failure(); - if (!xferOp.permutation_map().isIdentity()) + if (!xferOp.getPermutationMap().isIdentity()) return failure(); - if (xferOp.mask()) + if (xferOp.getMask()) return failure(); - auto extractOp = xferOp.source().getDefiningOp(); + auto extractOp = xferOp.getSource().getDefiningOp(); if (!extractOp) return failure(); if (!extractOp.hasUnitStride()) @@ -3039,7 +3042,7 @@ newIndices.push_back(getValueOrCreateConstantIndexOp( rewriter, extractOp.getLoc(), offset)); } - for (const auto &it : llvm::enumerate(xferOp.indices())) { + for (const auto &it : llvm::enumerate(xferOp.getIndices())) { OpFoldResult offset = extractOp.getMixedOffsets()[it.index() + rankReduced]; newIndices.push_back(rewriter.create( @@ -3050,7 +3053,7 @@ SmallVector inBounds(xferOp.getTransferRank(), true); rewriter.replaceOpWithNewOp( xferOp, xferOp.getVectorType(), extractOp.source(), newIndices, - xferOp.padding(), ArrayRef{inBounds}); + xferOp.getPadding(), ArrayRef{inBounds}); return success(); } @@ -3165,9 +3168,9 @@ } void TransferWriteOp::print(OpAsmPrinter &p) { - p << " " << vector() << ", " << source() << "[" << indices() << "]"; - if (mask()) - p << ", " << mask(); + p << " " << getVector() << ", " << getSource() << "[" << getIndices() << "]"; + if (getMask()) + p << ", " << getMask(); printTransferAttrs(p, *this); p << " : " << getVectorType() << ", " << getShapedType(); } @@ -3177,9 +3180,9 @@ ShapedType shapedType = getShapedType(); VectorType vectorType = getVectorType(); VectorType maskType = getMaskType(); - auto permutationMap = permutation_map(); + auto permutationMap = getPermutationMap(); - if (llvm::size(indices()) != shapedType.getRank()) + if (llvm::size(getIndices()) != shapedType.getRank()) return emitOpError("requires ") << shapedType.getRank() << " indices"; // We do not allow broadcast dimensions on TransferWriteOps for the moment, @@ -3189,7 +3192,7 @@ if (failed(verifyTransferOp(cast(getOperation()), shapedType, vectorType, maskType, permutationMap, - in_bounds() ? *in_bounds() : ArrayAttr()))) + getInBounds() ? *getInBounds() : ArrayAttr()))) return failure(); return verifyPermutationMap(permutationMap, @@ -3219,20 +3222,21 @@ // TODO: support 0-d corner case. if (write.getTransferRank() == 0) return failure(); - auto rankedTensorType = write.source().getType().dyn_cast(); + auto rankedTensorType = + write.getSource().getType().dyn_cast(); // If not operating on tensors, bail. if (!rankedTensorType) return failure(); // If no read, bail. - auto read = write.vector().getDefiningOp(); + auto read = write.getVector().getDefiningOp(); if (!read) return failure(); // TODO: support 0-d corner case. if (read.getTransferRank() == 0) return failure(); // For now, only accept minor identity. Future: composition is minor identity. - if (!read.permutation_map().isMinorIdentity() || - !write.permutation_map().isMinorIdentity()) + if (!read.getPermutationMap().isMinorIdentity() || + !write.getPermutationMap().isMinorIdentity()) return failure(); // Bail on mismatching ranks. if (read.getTransferRank() != write.getTransferRank()) @@ -3241,7 +3245,7 @@ if (read.hasOutOfBoundsDim() || write.hasOutOfBoundsDim()) return failure(); // Tensor types must be the same. - if (read.source().getType() != rankedTensorType) + if (read.getSource().getType() != rankedTensorType) return failure(); // Vector types must be the same. if (read.getVectorType() != write.getVectorType()) @@ -3254,20 +3258,21 @@ auto cstOp = v.getDefiningOp(); return !cstOp || cstOp.value() != 0; }; - if (llvm::any_of(read.indices(), isNotConstantZero) || - llvm::any_of(write.indices(), isNotConstantZero)) + if (llvm::any_of(read.getIndices(), isNotConstantZero) || + llvm::any_of(write.getIndices(), isNotConstantZero)) return failure(); // Success. - results.push_back(read.source()); + results.push_back(read.getSource()); return success(); } static bool checkSameValueWAR(vector::TransferReadOp read, vector::TransferWriteOp write) { - return read.source() == write.source() && read.indices() == write.indices() && - read.permutation_map() == write.permutation_map() && - read.getVectorType() == write.getVectorType() && !read.mask() && - !write.mask(); + return read.getSource() == write.getSource() && + read.getIndices() == write.getIndices() && + read.getPermutationMap() == write.getPermutationMap() && + read.getVectorType() == write.getVectorType() && !read.getMask() && + !write.getMask(); } /// Fold transfer_write write after read: /// ``` @@ -3285,15 +3290,15 @@ /// ``` static LogicalResult foldWAR(TransferWriteOp write, SmallVectorImpl &results) { - if (!write.source().getType().isa()) + if (!write.getSource().getType().isa()) return failure(); - auto read = write.vector().getDefiningOp(); + auto read = write.getVector().getDefiningOp(); if (!read) return failure(); if (!checkSameValueWAR(read, write)) return failure(); - results.push_back(read.source()); + results.push_back(read.getSource()); return success(); } @@ -3316,7 +3321,7 @@ SmallVectorImpl> &effects) { if (getShapedType().isa()) - effects.emplace_back(MemoryEffects::Write::get(), source(), + effects.emplace_back(MemoryEffects::Write::get(), getSource(), SideEffects::DefaultResource::get()); } @@ -3354,10 +3359,11 @@ return failure(); vector::TransferWriteOp writeToModify = writeOp; - auto defWrite = writeOp.source().getDefiningOp(); + auto defWrite = + writeOp.getSource().getDefiningOp(); while (defWrite) { if (checkSameValueWAW(writeOp, defWrite)) { - writeToModify.sourceMutable().assign(defWrite.source()); + writeToModify.getSourceMutable().assign(defWrite.getSource()); return success(); } if (!isDisjointTransferIndices( @@ -3369,7 +3375,7 @@ if (!defWrite->hasOneUse()) break; writeToModify = defWrite; - defWrite = defWrite.source().getDefiningOp(); + defWrite = defWrite.getSource().getDefiningOp(); } return failure(); } @@ -3410,7 +3416,7 @@ return failure(); if (xferOp.getVectorType().getRank() != xferOp.getShapedType().getRank()) return failure(); - if (xferOp.mask()) + if (xferOp.getMask()) return failure(); // Fold only if the TransferWriteOp completely overwrites the `source` with // a vector. I.e., the result of the TransferWriteOp is a new tensor whose @@ -3418,7 +3424,7 @@ if (!llvm::equal(xferOp.getVectorType().getShape(), xferOp.getShapedType().getShape())) return failure(); - if (!xferOp.permutation_map().isIdentity()) + if (!xferOp.getPermutationMap().isIdentity()) return failure(); // Bail on illegal rank-reduction: we need to check that the rank-reduced @@ -3453,7 +3459,7 @@ SmallVector indices = getValueOrCreateConstantIndexOp( rewriter, insertOp.getLoc(), insertOp.getMixedOffsets()); SmallVector inBounds(xferOp.getTransferRank(), true); - rewriter.replaceOpWithNewOp(insertOp, xferOp.vector(), + rewriter.replaceOpWithNewOp(insertOp, xferOp.getVector(), insertOp.dest(), indices, ArrayRef{inBounds}); return success(); @@ -3494,7 +3500,7 @@ if (resVecTy.getElementType() != memElemTy) return emitOpError("base and result element types should match"); - if (llvm::size(indices()) != memRefTy.getRank()) + if (llvm::size(getIndices()) != memRefTy.getRank()) return emitOpError("requires ") << memRefTy.getRank() << " indices"; return success(); } @@ -3527,7 +3533,7 @@ if (valueVecTy.getElementType() != memElemTy) return emitOpError("base and valueToStore element type should match"); - if (llvm::size(indices()) != memRefTy.getRank()) + if (llvm::size(getIndices()) != memRefTy.getRank()) return emitOpError("requires ") << memRefTy.getRank() << " indices"; return success(); } @@ -3549,7 +3555,7 @@ if (resVType.getElementType() != memType.getElementType()) return emitOpError("base and result element type should match"); - if (llvm::size(indices()) != memType.getRank()) + if (llvm::size(getIndices()) != memType.getRank()) return emitOpError("requires ") << memType.getRank() << " indices"; if (resVType.getDimSize(0) != maskVType.getDimSize(0)) return emitOpError("expected result dim to match mask dim"); @@ -3564,13 +3570,13 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(MaskedLoadOp load, PatternRewriter &rewriter) const override { - switch (get1DMaskFormat(load.mask())) { + switch (get1DMaskFormat(load.getMask())) { case MaskFormat::AllTrue: - rewriter.replaceOpWithNewOp(load, load.getType(), - load.base(), load.indices()); + rewriter.replaceOpWithNewOp( + load, load.getType(), load.getBase(), load.getIndices()); return success(); case MaskFormat::AllFalse: - rewriter.replaceOp(load, load.pass_thru()); + rewriter.replaceOp(load, load.getPassThru()); return success(); case MaskFormat::Unknown: return failure(); @@ -3602,7 +3608,7 @@ if (valueVType.getElementType() != memType.getElementType()) return emitOpError("base and valueToStore element type should match"); - if (llvm::size(indices()) != memType.getRank()) + if (llvm::size(getIndices()) != memType.getRank()) return emitOpError("requires ") << memType.getRank() << " indices"; if (valueVType.getDimSize(0) != maskVType.getDimSize(0)) return emitOpError("expected valueToStore dim to match mask dim"); @@ -3615,10 +3621,10 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(MaskedStoreOp store, PatternRewriter &rewriter) const override { - switch (get1DMaskFormat(store.mask())) { + switch (get1DMaskFormat(store.getMask())) { case MaskFormat::AllTrue: rewriter.replaceOpWithNewOp( - store, store.valueToStore(), store.base(), store.indices()); + store, store.getValueToStore(), store.getBase(), store.getIndices()); return success(); case MaskFormat::AllFalse: rewriter.eraseOp(store); @@ -3653,7 +3659,7 @@ if (resVType.getElementType() != memType.getElementType()) return emitOpError("base and result element type should match"); - if (llvm::size(indices()) != memType.getRank()) + if (llvm::size(getIndices()) != memType.getRank()) return emitOpError("requires ") << memType.getRank() << " indices"; if (resVType.getDimSize(0) != indVType.getDimSize(0)) return emitOpError("expected result dim to match indices dim"); @@ -3670,11 +3676,11 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(GatherOp gather, PatternRewriter &rewriter) const override { - switch (get1DMaskFormat(gather.mask())) { + switch (get1DMaskFormat(gather.getMask())) { case MaskFormat::AllTrue: return failure(); // no unmasked equivalent case MaskFormat::AllFalse: - rewriter.replaceOp(gather, gather.pass_thru()); + rewriter.replaceOp(gather, gather.getPassThru()); return success(); case MaskFormat::Unknown: return failure(); @@ -3701,7 +3707,7 @@ if (valueVType.getElementType() != memType.getElementType()) return emitOpError("base and valueToStore element type should match"); - if (llvm::size(indices()) != memType.getRank()) + if (llvm::size(getIndices()) != memType.getRank()) return emitOpError("requires ") << memType.getRank() << " indices"; if (valueVType.getDimSize(0) != indVType.getDimSize(0)) return emitOpError("expected valueToStore dim to match indices dim"); @@ -3716,7 +3722,7 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(ScatterOp scatter, PatternRewriter &rewriter) const override { - switch (get1DMaskFormat(scatter.mask())) { + switch (get1DMaskFormat(scatter.getMask())) { case MaskFormat::AllTrue: return failure(); // no unmasked equivalent case MaskFormat::AllFalse: @@ -3747,7 +3753,7 @@ if (resVType.getElementType() != memType.getElementType()) return emitOpError("base and result element type should match"); - if (llvm::size(indices()) != memType.getRank()) + if (llvm::size(getIndices()) != memType.getRank()) return emitOpError("requires ") << memType.getRank() << " indices"; if (resVType.getDimSize(0) != maskVType.getDimSize(0)) return emitOpError("expected result dim to match mask dim"); @@ -3762,13 +3768,13 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(ExpandLoadOp expand, PatternRewriter &rewriter) const override { - switch (get1DMaskFormat(expand.mask())) { + switch (get1DMaskFormat(expand.getMask())) { case MaskFormat::AllTrue: rewriter.replaceOpWithNewOp( - expand, expand.getType(), expand.base(), expand.indices()); + expand, expand.getType(), expand.getBase(), expand.getIndices()); return success(); case MaskFormat::AllFalse: - rewriter.replaceOp(expand, expand.pass_thru()); + rewriter.replaceOp(expand, expand.getPassThru()); return success(); case MaskFormat::Unknown: return failure(); @@ -3794,7 +3800,7 @@ if (valueVType.getElementType() != memType.getElementType()) return emitOpError("base and valueToStore element type should match"); - if (llvm::size(indices()) != memType.getRank()) + if (llvm::size(getIndices()) != memType.getRank()) return emitOpError("requires ") << memType.getRank() << " indices"; if (valueVType.getDimSize(0) != maskVType.getDimSize(0)) return emitOpError("expected valueToStore dim to match mask dim"); @@ -3807,11 +3813,11 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(CompressStoreOp compress, PatternRewriter &rewriter) const override { - switch (get1DMaskFormat(compress.mask())) { + switch (get1DMaskFormat(compress.getMask())) { case MaskFormat::AllTrue: rewriter.replaceOpWithNewOp( - compress, compress.valueToStore(), compress.base(), - compress.indices()); + compress, compress.getValueToStore(), compress.getBase(), + compress.getIndices()); return success(); case MaskFormat::AllFalse: rewriter.eraseOp(compress); @@ -3894,8 +3900,8 @@ } LogicalResult ShapeCastOp::verify() { - auto sourceVectorType = source().getType().dyn_cast_or_null(); - auto resultVectorType = result().getType().dyn_cast_or_null(); + auto sourceVectorType = getSource().getType().dyn_cast_or_null(); + auto resultVectorType = getResult().getType().dyn_cast_or_null(); // Check if source/result are of vector type. if (sourceVectorType && resultVectorType) @@ -3906,16 +3912,16 @@ OpFoldResult ShapeCastOp::fold(ArrayRef operands) { // Nop shape cast. - if (source().getType() == result().getType()) - return source(); + if (getSource().getType() == getResult().getType()) + return getSource(); // Canceling shape casts. - if (auto otherOp = source().getDefiningOp()) { - if (result().getType() == otherOp.source().getType()) - return otherOp.source(); + if (auto otherOp = getSource().getDefiningOp()) { + if (getResult().getType() == otherOp.getSource().getType()) + return otherOp.getSource(); // Only allows valid transitive folding. - VectorType srcType = otherOp.source().getType().cast(); + VectorType srcType = otherOp.getSource().getType().cast(); VectorType resultType = getResult().getType().cast(); if (srcType.getRank() < resultType.getRank()) { if (!isValidShapeCast(srcType.getShape(), resultType.getShape())) @@ -3927,7 +3933,7 @@ return {}; } - setOperand(otherOp.source()); + setOperand(otherOp.getSource()); return getResult(); } return {}; @@ -3941,7 +3947,8 @@ LogicalResult matchAndRewrite(ShapeCastOp shapeCastOp, PatternRewriter &rewriter) const override { - auto constantOp = shapeCastOp.source().getDefiningOp(); + auto constantOp = + shapeCastOp.getSource().getDefiningOp(); if (!constantOp) return failure(); // Only handle splat for now. @@ -3998,13 +4005,13 @@ OpFoldResult BitCastOp::fold(ArrayRef operands) { // Nop cast. - if (source().getType() == result().getType()) - return source(); + if (getSource().getType() == getResult().getType()) + return getSource(); // Canceling bitcasts. - if (auto otherOp = source().getDefiningOp()) - if (result().getType() == otherOp.source().getType()) - return otherOp.source(); + if (auto otherOp = getSource().getDefiningOp()) + if (getResult().getType() == otherOp.getSource().getType()) + return otherOp.getSource(); Attribute sourceConstant = operands.front(); if (!sourceConstant) @@ -4113,7 +4120,7 @@ return {}; } - return vector(); + return getVector(); } LogicalResult vector::TransposeOp::verify() { @@ -4123,7 +4130,7 @@ if (vectorType.getRank() != rank) return emitOpError("vector result rank mismatch: ") << rank; // Verify transposition array. - auto transpAttr = transp().getValue(); + auto transpAttr = getTransp().getValue(); int64_t size = transpAttr.size(); if (rank != size) return emitOpError("transposition length mismatch: ") << size; @@ -4168,7 +4175,7 @@ // Return if the input of 'transposeOp' is not defined by another transpose. vector::TransposeOp parentTransposeOp = - transposeOp.vector().getDefiningOp(); + transposeOp.getVector().getDefiningOp(); if (!parentTransposeOp) return failure(); @@ -4177,7 +4184,7 @@ // Replace 'transposeOp' with a new transpose operation. rewriter.replaceOpWithNewOp( transposeOp, transposeOp.getResult().getType(), - parentTransposeOp.vector(), + parentTransposeOp.getVector(), vector::getVectorSubscriptAttr(rewriter, permutation)); return success(); } @@ -4191,7 +4198,7 @@ } void vector::TransposeOp::getTransp(SmallVectorImpl &results) { - populateFromInt64AttrArray(transp(), results); + populateFromInt64AttrArray(getTransp(), results); } //===----------------------------------------------------------------------===// @@ -4202,23 +4209,23 @@ auto resultType = getResult().getType().cast(); // Check the corner case of 0-D vectors first. if (resultType.getRank() == 0) { - if (mask_dim_sizes().size() != 1) + if (getMaskDimSizes().size() != 1) return emitError("array attr must have length 1 for 0-D vectors"); - auto dim = mask_dim_sizes()[0].cast().getInt(); + auto dim = getMaskDimSizes()[0].cast().getInt(); if (dim != 0 && dim != 1) return emitError("mask dim size must be either 0 or 1 for 0-D vectors"); return success(); } // Verify that array attr size matches the rank of the vector result. - if (static_cast(mask_dim_sizes().size()) != resultType.getRank()) + if (static_cast(getMaskDimSizes().size()) != resultType.getRank()) return emitOpError( "must specify array attr of size equal vector result rank"); // Verify that each array attr element is in bounds of corresponding vector // result dimension size. auto resultShape = resultType.getShape(); SmallVector maskDimSizes; - for (const auto &it : llvm::enumerate(mask_dim_sizes())) { + for (const auto &it : llvm::enumerate(getMaskDimSizes())) { int64_t attrValue = it.value().cast().getInt(); if (attrValue < 0 || attrValue > resultShape[it.index()]) return emitOpError( @@ -4308,7 +4315,7 @@ VectorType initialType = getInitialValueType(); // Check reduction dimension < rank. int64_t srcRank = srcType.getRank(); - int64_t reductionDim = reduction_dim(); + int64_t reductionDim = getReductionDim(); if (reductionDim >= srcRank) return emitOpError("reduction dimension ") << reductionDim << " has to be less than " << srcRank; diff --git a/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp @@ -55,9 +55,9 @@ Value buffer = *state.getBuffer(rewriter, readOp->getOpOperand(0) /*source*/); replaceOpWithNewBufferizedOp( - rewriter, readOp, readOp.getVectorType(), buffer, readOp.indices(), - readOp.permutation_map(), readOp.padding(), readOp.mask(), - readOp.in_boundsAttr()); + rewriter, readOp, readOp.getVectorType(), buffer, readOp.getIndices(), + readOp.getPermutationMap(), readOp.getPadding(), readOp.getMask(), + readOp.getInBoundsAttr()); return success(); } }; @@ -107,8 +107,9 @@ if (failed(resultBuffer)) return failure(); rewriter.create( - writeOp.getLoc(), writeOp.vector(), *resultBuffer, writeOp.indices(), - writeOp.permutation_mapAttr(), writeOp.in_boundsAttr()); + writeOp.getLoc(), writeOp.getVector(), *resultBuffer, + writeOp.getIndices(), writeOp.getPermutationMapAttr(), + writeOp.getInBoundsAttr()); replaceOpWithBufferizedValues(rewriter, op, *resultBuffer); return success(); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp @@ -63,16 +63,16 @@ Location loc = extractOp.getLoc(); Value newSrcVector = rewriter.create( - loc, extractOp.vector(), splatZero(dropCount)); + loc, extractOp.getVector(), splatZero(dropCount)); // The offsets/sizes/strides attribute can have a less number of elements // than the input vector's rank: it is meant for the leading dimensions. auto newOffsets = rewriter.getArrayAttr( - extractOp.offsets().getValue().drop_front(dropCount)); + extractOp.getOffsets().getValue().drop_front(dropCount)); auto newSizes = rewriter.getArrayAttr( - extractOp.sizes().getValue().drop_front(dropCount)); + extractOp.getSizes().getValue().drop_front(dropCount)); auto newStrides = rewriter.getArrayAttr( - extractOp.strides().getValue().drop_front(dropCount)); + extractOp.getStrides().getValue().drop_front(dropCount)); auto newExtractOp = rewriter.create( loc, newDstType, newSrcVector, newOffsets, newSizes, newStrides); @@ -106,14 +106,14 @@ Location loc = insertOp.getLoc(); Value newSrcVector = rewriter.create( - loc, insertOp.source(), splatZero(srcDropCount)); + loc, insertOp.getSource(), splatZero(srcDropCount)); Value newDstVector = rewriter.create( - loc, insertOp.dest(), splatZero(dstDropCount)); + loc, insertOp.getDest(), splatZero(dstDropCount)); auto newOffsets = rewriter.getArrayAttr( - insertOp.offsets().getValue().take_back(newDstType.getRank())); + insertOp.getOffsets().getValue().take_back(newDstType.getRank())); auto newStrides = rewriter.getArrayAttr( - insertOp.strides().getValue().take_back(newSrcType.getRank())); + insertOp.getStrides().getValue().take_back(newSrcType.getRank())); auto newInsertOp = rewriter.create( loc, newDstType, newSrcVector, newDstVector, newOffsets, newStrides); @@ -138,10 +138,10 @@ if (read.getTransferRank() == 0) return failure(); - if (read.mask()) + if (read.getMask()) return failure(); - auto shapedType = read.source().getType().cast(); + auto shapedType = read.getSource().getType().cast(); if (shapedType.getElementType() != read.getVectorType().getElementType()) return failure(); @@ -151,7 +151,7 @@ if (newType == oldType) return failure(); - AffineMap oldMap = read.permutation_map(); + AffineMap oldMap = read.getPermutationMap(); ArrayRef newResults = oldMap.getResults().take_back(newType.getRank()); AffineMap newMap = @@ -159,13 +159,13 @@ rewriter.getContext()); ArrayAttr inBoundsAttr; - if (read.in_bounds()) + if (read.getInBounds()) inBoundsAttr = rewriter.getArrayAttr( - read.in_boundsAttr().getValue().take_back(newType.getRank())); + read.getInBoundsAttr().getValue().take_back(newType.getRank())); auto newRead = rewriter.create( - read.getLoc(), newType, read.source(), read.indices(), - AffineMapAttr::get(newMap), read.padding(), /*mask=*/Value(), + read.getLoc(), newType, read.getSource(), read.getIndices(), + AffineMapAttr::get(newMap), read.getPadding(), /*mask=*/Value(), inBoundsAttr); rewriter.replaceOpWithNewOp(read, oldType, newRead); @@ -186,10 +186,10 @@ if (write.getTransferRank() == 0) return failure(); - if (write.mask()) + if (write.getMask()) return failure(); - auto shapedType = write.source().getType().dyn_cast(); + auto shapedType = write.getSource().getType().dyn_cast(); if (shapedType.getElementType() != write.getVectorType().getElementType()) return failure(); @@ -199,7 +199,7 @@ return failure(); int64_t dropDim = oldType.getRank() - newType.getRank(); - AffineMap oldMap = write.permutation_map(); + AffineMap oldMap = write.getPermutationMap(); ArrayRef newResults = oldMap.getResults().take_back(newType.getRank()); AffineMap newMap = @@ -207,14 +207,14 @@ rewriter.getContext()); ArrayAttr inBoundsAttr; - if (write.in_bounds()) + if (write.getInBounds()) inBoundsAttr = rewriter.getArrayAttr( - write.in_boundsAttr().getValue().take_back(newType.getRank())); + write.getInBoundsAttr().getValue().take_back(newType.getRank())); auto newVector = rewriter.create( - write.getLoc(), write.vector(), splatZero(dropDim)); + write.getLoc(), write.getVector(), splatZero(dropDim)); rewriter.replaceOpWithNewOp( - write, newVector, write.source(), write.indices(), + write, newVector, write.getSource(), write.getIndices(), AffineMapAttr::get(newMap), inBoundsAttr); return success(); @@ -237,7 +237,7 @@ if (oldAccType.getRank() < 2) return failure(); // TODO: implement masks. - if (llvm::size(contractOp.masks()) != 0) + if (llvm::size(contractOp.getMasks()) != 0) return failure(); if (oldAccType.getShape()[0] != 1) return failure(); @@ -248,7 +248,7 @@ auto oldIndexingMaps = contractOp.getIndexingMaps(); SmallVector newIndexingMaps; - auto oldIteratorTypes = contractOp.iterator_types(); + auto oldIteratorTypes = contractOp.getIteratorTypes(); SmallVector newIteratorTypes; int64_t dimToDrop = oldIndexingMaps[2].getDimPosition(0); @@ -264,8 +264,8 @@ newIteratorTypes.push_back(it.value()); } - SmallVector operands = {contractOp.lhs(), contractOp.rhs(), - contractOp.acc()}; + SmallVector operands = {contractOp.getLhs(), contractOp.getRhs(), + contractOp.getAcc()}; SmallVector newOperands; for (const auto &it : llvm::enumerate(oldIndexingMaps)) { @@ -336,7 +336,7 @@ auto newContractOp = rewriter.create( contractOp.getLoc(), newOperands[0], newOperands[1], newOperands[2], rewriter.getAffineMapArrayAttr(newIndexingMaps), - rewriter.getArrayAttr(newIteratorTypes), contractOp.kind()); + rewriter.getArrayAttr(newIteratorTypes), contractOp.getKind()); rewriter.replaceOpWithNewOp( contractOp, contractOp->getResultTypes()[0], newContractOp); return success(); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp @@ -62,7 +62,7 @@ auto srcType = op.getSourceVectorType(); auto dstType = op.getDestVectorType(); - if (op.offsets().getValue().empty()) + if (op.getOffsets().getValue().empty()) return failure(); auto loc = op.getLoc(); @@ -74,21 +74,21 @@ int64_t rankRest = dstType.getRank() - rankDiff; // Extract / insert the subvector of matching rank and InsertStridedSlice // on it. - Value extracted = - rewriter.create(loc, op.dest(), - getI64SubArray(op.offsets(), /*dropFront=*/0, - /*dropBack=*/rankRest)); + Value extracted = rewriter.create( + loc, op.getDest(), + getI64SubArray(op.getOffsets(), /*dropFront=*/0, + /*dropBack=*/rankRest)); // A different pattern will kick in for InsertStridedSlice with matching // ranks. auto stridedSliceInnerOp = rewriter.create( - loc, op.source(), extracted, - getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), - getI64SubArray(op.strides(), /*dropFront=*/0)); + loc, op.getSource(), extracted, + getI64SubArray(op.getOffsets(), /*dropFront=*/rankDiff), + getI64SubArray(op.getStrides(), /*dropFront=*/0)); rewriter.replaceOpWithNewOp( - op, stridedSliceInnerOp.getResult(), op.dest(), - getI64SubArray(op.offsets(), /*dropFront=*/0, + op, stridedSliceInnerOp.getResult(), op.getDest(), + getI64SubArray(op.getOffsets(), /*dropFront=*/0, /*dropBack=*/rankRest)); return success(); } @@ -118,7 +118,7 @@ auto srcType = op.getSourceVectorType(); auto dstType = op.getDestVectorType(); - if (op.offsets().getValue().empty()) + if (op.getOffsets().getValue().empty()) return failure(); int64_t srcRank = srcType.getRank(); @@ -128,18 +128,18 @@ return failure(); if (srcType == dstType) { - rewriter.replaceOp(op, op.source()); + rewriter.replaceOp(op, op.getSource()); return success(); } int64_t offset = - op.offsets().getValue().front().cast().getInt(); + op.getOffsets().getValue().front().cast().getInt(); int64_t size = srcType.getShape().front(); int64_t stride = - op.strides().getValue().front().cast().getInt(); + op.getStrides().getValue().front().cast().getInt(); auto loc = op.getLoc(); - Value res = op.dest(); + Value res = op.getDest(); if (srcRank == 1) { int nSrc = srcType.getShape().front(); @@ -148,8 +148,8 @@ SmallVector offsets(nDest, 0); for (int64_t i = 0; i < nSrc; ++i) offsets[i] = i; - Value scaledSource = - rewriter.create(loc, op.source(), op.source(), offsets); + Value scaledSource = rewriter.create(loc, op.getSource(), + op.getSource(), offsets); // 2. Create a mask where we take the value from scaledSource of dest // depending on the offset. @@ -162,7 +162,7 @@ } // 3. Replace with a ShuffleOp. - rewriter.replaceOpWithNewOp(op, scaledSource, op.dest(), + rewriter.replaceOpWithNewOp(op, scaledSource, op.getDest(), offsets); return success(); @@ -172,17 +172,17 @@ for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; off += stride, ++idx) { // 1. extract the proper subvector (or element) from source - Value extractedSource = extractOne(rewriter, loc, op.source(), idx); + Value extractedSource = extractOne(rewriter, loc, op.getSource(), idx); if (extractedSource.getType().isa()) { // 2. If we have a vector, extract the proper subvector from destination // Otherwise we are at the element level and no need to recurse. - Value extractedDest = extractOne(rewriter, loc, op.dest(), off); + Value extractedDest = extractOne(rewriter, loc, op.getDest(), off); // 3. Reduce the problem to lowering a new InsertStridedSlice op with // smaller rank. extractedSource = rewriter.create( loc, extractedSource, extractedDest, - getI64SubArray(op.offsets(), /* dropFront=*/1), - getI64SubArray(op.strides(), /* dropFront=*/1)); + getI64SubArray(op.getOffsets(), /* dropFront=*/1), + getI64SubArray(op.getStrides(), /* dropFront=*/1)); } // 4. Insert the extractedSource into the res vector. res = insertOne(rewriter, loc, extractedSource, res, off); @@ -212,27 +212,28 @@ PatternRewriter &rewriter) const override { auto dstType = op.getType(); - assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); + assert(!op.getOffsets().getValue().empty() && "Unexpected empty offsets"); int64_t offset = - op.offsets().getValue().front().cast().getInt(); - int64_t size = op.sizes().getValue().front().cast().getInt(); + op.getOffsets().getValue().front().cast().getInt(); + int64_t size = + op.getSizes().getValue().front().cast().getInt(); int64_t stride = - op.strides().getValue().front().cast().getInt(); + op.getStrides().getValue().front().cast().getInt(); auto loc = op.getLoc(); auto elemType = dstType.getElementType(); assert(elemType.isSignlessIntOrIndexOrFloat()); // Single offset can be more efficiently shuffled. - if (op.offsets().getValue().size() == 1) { + if (op.getOffsets().getValue().size() == 1) { SmallVector offsets; offsets.reserve(size); for (int64_t off = offset, e = offset + size * stride; off < e; off += stride) offsets.push_back(off); - rewriter.replaceOpWithNewOp(op, dstType, op.vector(), - op.vector(), + rewriter.replaceOpWithNewOp(op, dstType, op.getVector(), + op.getVector(), rewriter.getI64ArrayAttr(offsets)); return success(); } @@ -243,11 +244,11 @@ Value res = rewriter.create(loc, dstType, zero); for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; off += stride, ++idx) { - Value one = extractOne(rewriter, loc, op.vector(), off); + Value one = extractOne(rewriter, loc, op.getVector(), off); Value extracted = rewriter.create( - loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1), - getI64SubArray(op.sizes(), /* dropFront=*/1), - getI64SubArray(op.strides(), /* dropFront=*/1)); + loc, one, getI64SubArray(op.getOffsets(), /* dropFront=*/1), + getI64SubArray(op.getSizes(), /* dropFront=*/1), + getI64SubArray(op.getStrides(), /* dropFront=*/1)); res = insertOne(rewriter, loc, extracted, res, idx); } rewriter.replaceOp(op, res); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorMultiDimReductionTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorMultiDimReductionTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorMultiDimReductionTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorMultiDimReductionTransforms.cpp @@ -38,13 +38,13 @@ LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp, PatternRewriter &rewriter) const override { - auto src = multiReductionOp.source(); + auto src = multiReductionOp.getSource(); auto loc = multiReductionOp.getLoc(); auto srcRank = multiReductionOp.getSourceVectorType().getRank(); // Separate reduction and parallel dims auto reductionDimsRange = - multiReductionOp.reduction_dims().getAsValueRange(); + multiReductionOp.getReductionDims().getAsValueRange(); auto reductionDims = llvm::to_vector<4>(llvm::map_range( reductionDimsRange, [](const APInt &a) { return a.getZExtValue(); })); llvm::SmallDenseSet reductionDimsSet(reductionDims.begin(), @@ -86,8 +86,8 @@ reductionMask[i] = true; } rewriter.replaceOpWithNewOp( - multiReductionOp, transposeOp.result(), reductionMask, - multiReductionOp.kind()); + multiReductionOp, transposeOp.getResult(), reductionMask, + multiReductionOp.getKind()); return success(); } @@ -186,17 +186,17 @@ auto castedType = VectorType::get( vectorShape, multiReductionOp.getSourceVectorType().getElementType()); Value cast = rewriter.create( - loc, castedType, multiReductionOp.source()); + loc, castedType, multiReductionOp.getSource()); // 5. Creates the flattened form of vector.multi_reduction with inner/outer // most dim as reduction. auto newOp = rewriter.create( - loc, cast, mask, multiReductionOp.kind()); + loc, cast, mask, multiReductionOp.getKind()); // 6. If there are no parallel shapes, the result is a scalar. // TODO: support 0-d vectors when available. if (parallelShapes.empty()) { - rewriter.replaceOp(multiReductionOp, newOp.dest()); + rewriter.replaceOp(multiReductionOp, newOp.getDest()); return success(); } @@ -205,7 +205,7 @@ parallelShapes, multiReductionOp.getSourceVectorType().getElementType()); rewriter.replaceOpWithNewOp( - multiReductionOp, outputCastedType, newOp.dest()); + multiReductionOp, outputCastedType, newOp.getDest()); return success(); } @@ -238,12 +238,12 @@ return failure(); Value result = - rewriter.create(loc, multiReductionOp.source(), 0) + rewriter.create(loc, multiReductionOp.getSource(), 0) .getResult(); for (int64_t i = 1; i < srcShape[0]; i++) { - auto operand = - rewriter.create(loc, multiReductionOp.source(), i); - result = makeArithReduction(rewriter, loc, multiReductionOp.kind(), + auto operand = rewriter.create( + loc, multiReductionOp.getSource(), i); + result = makeArithReduction(rewriter, loc, multiReductionOp.getKind(), operand, result); } @@ -275,9 +275,9 @@ for (int i = 0; i < outerDim; ++i) { auto v = rewriter.create( - loc, multiReductionOp.source(), ArrayRef{i}); - auto reducedValue = - rewriter.create(loc, multiReductionOp.kind(), v); + loc, multiReductionOp.getSource(), ArrayRef{i}); + auto reducedValue = rewriter.create( + loc, multiReductionOp.getKind(), v); result = rewriter.create( loc, reducedValue, result, rewriter.create(loc, i)); @@ -317,9 +317,9 @@ /// vector.extract(vector.multi_reduce(vector.shape_cast(v, 1xk)), 0) Value cast = rewriter.create( - loc, castedType, multiReductionOp.source()); + loc, castedType, multiReductionOp.getSource()); Value reduced = rewriter.create( - loc, cast, mask, multiReductionOp.kind()); + loc, cast, mask, multiReductionOp.getKind()); rewriter.replaceOpWithNewOp(multiReductionOp, reduced, ArrayRef{0}); return success(); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp @@ -96,7 +96,7 @@ << "\n"); llvm::SmallVector reads; Operation *firstOverwriteCandidate = nullptr; - for (auto *user : write.source().getUsers()) { + for (auto *user : write.getSource().getUsers()) { if (user == write.getOperation()) continue; if (auto nextWrite = dyn_cast(user)) { @@ -163,7 +163,7 @@ << "\n"); SmallVector blockingWrites; vector::TransferWriteOp lastwrite = nullptr; - for (Operation *user : read.source().getUsers()) { + for (Operation *user : read.getSource().getUsers()) { if (isa(user)) continue; if (auto write = dyn_cast(user)) { @@ -207,7 +207,7 @@ LLVM_DEBUG(DBGS() << "Forward value from " << *lastwrite.getOperation() << " to: " << *read.getOperation() << "\n"); - read.replaceAllUsesWith(lastwrite.vector()); + read.replaceAllUsesWith(lastwrite.getVector()); opToErase.push_back(read.getOperation()); } @@ -259,9 +259,9 @@ LogicalResult matchAndRewrite(vector::TransferReadOp transferReadOp, PatternRewriter &rewriter) const override { auto loc = transferReadOp.getLoc(); - Value vector = transferReadOp.vector(); + Value vector = transferReadOp.getVector(); VectorType vectorType = vector.getType().cast(); - Value source = transferReadOp.source(); + Value source = transferReadOp.getSource(); MemRefType sourceType = source.getType().dyn_cast(); // TODO: support tensor types. if (!sourceType || !sourceType.hasStaticShape()) @@ -271,7 +271,7 @@ // TODO: generalize this pattern, relax the requirements here. if (transferReadOp.hasOutOfBoundsDim()) return failure(); - if (!transferReadOp.permutation_map().isMinorIdentity()) + if (!transferReadOp.getPermutationMap().isMinorIdentity()) return failure(); int reducedRank = getReducedRank(sourceType.getShape()); if (reducedRank == sourceType.getRank()) @@ -279,7 +279,7 @@ if (reducedRank != vectorType.getRank()) return failure(); // This pattern requires the vector shape to match the // reduced source shape. - if (llvm::any_of(transferReadOp.indices(), + if (llvm::any_of(transferReadOp.getIndices(), [](Value v) { return !isZero(v); })) return failure(); Value reducedShapeSource = @@ -302,9 +302,9 @@ LogicalResult matchAndRewrite(vector::TransferWriteOp transferWriteOp, PatternRewriter &rewriter) const override { auto loc = transferWriteOp.getLoc(); - Value vector = transferWriteOp.vector(); + Value vector = transferWriteOp.getVector(); VectorType vectorType = vector.getType().cast(); - Value source = transferWriteOp.source(); + Value source = transferWriteOp.getSource(); MemRefType sourceType = source.getType().dyn_cast(); // TODO: support tensor type. if (!sourceType || !sourceType.hasStaticShape()) @@ -314,7 +314,7 @@ // TODO: generalize this pattern, relax the requirements here. if (transferWriteOp.hasOutOfBoundsDim()) return failure(); - if (!transferWriteOp.permutation_map().isMinorIdentity()) + if (!transferWriteOp.getPermutationMap().isMinorIdentity()) return failure(); int reducedRank = getReducedRank(sourceType.getShape()); if (reducedRank == sourceType.getRank()) @@ -322,7 +322,7 @@ if (reducedRank != vectorType.getRank()) return failure(); // This pattern requires the vector shape to match the // reduced source shape. - if (llvm::any_of(transferWriteOp.indices(), + if (llvm::any_of(transferWriteOp.getIndices(), [](Value v) { return !isZero(v); })) return failure(); Value reducedShapeSource = @@ -366,9 +366,9 @@ LogicalResult matchAndRewrite(vector::TransferReadOp transferReadOp, PatternRewriter &rewriter) const override { auto loc = transferReadOp.getLoc(); - Value vector = transferReadOp.vector(); + Value vector = transferReadOp.getVector(); VectorType vectorType = vector.getType().cast(); - Value source = transferReadOp.source(); + Value source = transferReadOp.getSource(); MemRefType sourceType = source.getType().dyn_cast(); // Contiguity check is valid on tensors only. if (!sourceType) @@ -386,11 +386,11 @@ // TODO: generalize this pattern, relax the requirements here. if (transferReadOp.hasOutOfBoundsDim()) return failure(); - if (!transferReadOp.permutation_map().isMinorIdentity()) + if (!transferReadOp.getPermutationMap().isMinorIdentity()) return failure(); - if (transferReadOp.mask()) + if (transferReadOp.getMask()) return failure(); - if (llvm::any_of(transferReadOp.indices(), + if (llvm::any_of(transferReadOp.getIndices(), [](Value v) { return !isZero(v); })) return failure(); Value c0 = rewriter.create(loc, 0); @@ -418,9 +418,9 @@ LogicalResult matchAndRewrite(vector::TransferWriteOp transferWriteOp, PatternRewriter &rewriter) const override { auto loc = transferWriteOp.getLoc(); - Value vector = transferWriteOp.vector(); + Value vector = transferWriteOp.getVector(); VectorType vectorType = vector.getType().cast(); - Value source = transferWriteOp.source(); + Value source = transferWriteOp.getSource(); MemRefType sourceType = source.getType().dyn_cast(); // Contiguity check is valid on tensors only. if (!sourceType) @@ -438,11 +438,11 @@ // TODO: generalize this pattern, relax the requirements here. if (transferWriteOp.hasOutOfBoundsDim()) return failure(); - if (!transferWriteOp.permutation_map().isMinorIdentity()) + if (!transferWriteOp.getPermutationMap().isMinorIdentity()) return failure(); - if (transferWriteOp.mask()) + if (transferWriteOp.getMask()) return failure(); - if (llvm::any_of(transferWriteOp.indices(), + if (llvm::any_of(transferWriteOp.getIndices(), [](Value v) { return !isZero(v); })) return failure(); Value c0 = rewriter.create(loc, 0); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferPermutationMapRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferPermutationMapRewritePatterns.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferPermutationMapRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferPermutationMapRewritePatterns.cpp @@ -62,7 +62,7 @@ return failure(); SmallVector permutation; - AffineMap map = op.permutation_map(); + AffineMap map = op.getPermutationMap(); if (map.getNumResults() == 0) return failure(); if (!map.isPermutationOfMinorIdentityWithBroadcasting(permutation)) @@ -85,7 +85,7 @@ // Transpose mask operand. Value newMask; - if (op.mask()) { + if (op.getMask()) { // Remove unused dims from the permutation map. E.g.: // E.g.: (d0, d1, d2, d3, d4, d5) -> (d5, 0, d3, 0, d2) // comp = (d0, d1, d2) -> (d2, 0, d1, 0 d0) @@ -99,22 +99,23 @@ maskTransposeIndices.push_back(expr.getPosition()); } - newMask = rewriter.create(op.getLoc(), op.mask(), + newMask = rewriter.create(op.getLoc(), op.getMask(), maskTransposeIndices); } // Transpose in_bounds attribute. ArrayAttr newInBoundsAttr = - op.in_bounds() ? transposeInBoundsAttr( - rewriter, op.in_bounds().getValue(), permutation) - : ArrayAttr(); + op.getInBounds() + ? transposeInBoundsAttr(rewriter, op.getInBounds().getValue(), + permutation) + : ArrayAttr(); // Generate new transfer_read operation. VectorType newReadType = VectorType::get(newVectorShape, op.getVectorType().getElementType()); Value newRead = rewriter.create( - op.getLoc(), newReadType, op.source(), op.indices(), - AffineMapAttr::get(newMap), op.padding(), newMask, newInBoundsAttr); + op.getLoc(), newReadType, op.getSource(), op.getIndices(), + AffineMapAttr::get(newMap), op.getPadding(), newMask, newInBoundsAttr); // Transpose result of transfer_read. SmallVector transposePerm(permutation.begin(), permutation.end()); @@ -151,7 +152,7 @@ return failure(); SmallVector permutation; - AffineMap map = op.permutation_map(); + AffineMap map = op.getPermutationMap(); if (map.isMinorIdentity()) return failure(); if (!map.isPermutationOfMinorIdentityWithBroadcasting(permutation)) @@ -169,23 +170,24 @@ }); // Transpose mask operand. - Value newMask = op.mask() ? rewriter.create( - op.getLoc(), op.mask(), indices) - : Value(); + Value newMask = op.getMask() ? rewriter.create( + op.getLoc(), op.getMask(), indices) + : Value(); // Transpose in_bounds attribute. ArrayAttr newInBoundsAttr = - op.in_bounds() ? transposeInBoundsAttr( - rewriter, op.in_bounds().getValue(), permutation) - : ArrayAttr(); + op.getInBounds() + ? transposeInBoundsAttr(rewriter, op.getInBounds().getValue(), + permutation) + : ArrayAttr(); // Generate new transfer_write operation. - Value newVec = - rewriter.create(op.getLoc(), op.vector(), indices); + Value newVec = rewriter.create( + op.getLoc(), op.getVector(), indices); auto newMap = AffineMap::getMinorIdentityMap( map.getNumDims(), map.getNumResults(), rewriter.getContext()); rewriter.replaceOpWithNewOp( - op, newVec, op.source(), op.indices(), AffineMapAttr::get(newMap), + op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap), newMask, newInBoundsAttr); return success(); @@ -209,7 +211,7 @@ if (op.getTransferRank() == 0) return failure(); - AffineMap map = op.permutation_map(); + AffineMap map = op.getPermutationMap(); unsigned numLeadingBroadcast = 0; for (auto expr : map.getResults()) { auto dimExpr = expr.dyn_cast(); @@ -237,12 +239,12 @@ if (reducedShapeRank == 0) { Value newRead; if (op.getShapedType().isa()) { - newRead = rewriter.create(op.getLoc(), op.source(), - op.indices()); + newRead = rewriter.create( + op.getLoc(), op.getSource(), op.getIndices()); } else { newRead = rewriter.create( - op.getLoc(), originalVecType.getElementType(), op.source(), - op.indices()); + op.getLoc(), originalVecType.getElementType(), op.getSource(), + op.getIndices()); } rewriter.replaceOpWithNewOp(op, originalVecType, newRead); @@ -256,13 +258,14 @@ VectorType newReadType = VectorType::get(newShape, originalVecType.getElementType()); ArrayAttr newInBoundsAttr = - op.in_bounds() + op.getInBounds() ? rewriter.getArrayAttr( - op.in_boundsAttr().getValue().take_back(reducedShapeRank)) + op.getInBoundsAttr().getValue().take_back(reducedShapeRank)) : ArrayAttr(); Value newRead = rewriter.create( - op.getLoc(), newReadType, op.source(), op.indices(), - AffineMapAttr::get(newMap), op.padding(), op.mask(), newInBoundsAttr); + op.getLoc(), newReadType, op.getSource(), op.getIndices(), + AffineMapAttr::get(newMap), op.getPadding(), op.getMask(), + newInBoundsAttr); rewriter.replaceOpWithNewOp(op, originalVecType, newRead); return success(); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp @@ -249,7 +249,7 @@ MemRefType compatibleMemRefType, Value alloc) { Location loc = xferOp.getLoc(); Value zero = b.create(loc, 0); - Value memref = xferOp.source(); + Value memref = xferOp.getSource(); return b.create( loc, returnTypes, inBoundsCond, [&](OpBuilder &b, Location loc) { @@ -257,12 +257,12 @@ if (compatibleMemRefType != xferOp.getShapedType()) res = b.create(loc, compatibleMemRefType, memref); scf::ValueVector viewAndIndices{res}; - viewAndIndices.insert(viewAndIndices.end(), xferOp.indices().begin(), - xferOp.indices().end()); + viewAndIndices.insert(viewAndIndices.end(), xferOp.getIndices().begin(), + xferOp.getIndices().end()); b.create(loc, viewAndIndices); }, [&](OpBuilder &b, Location loc) { - b.create(loc, ValueRange{xferOp.padding()}, + b.create(loc, ValueRange{xferOp.getPadding()}, ValueRange{alloc}); // Take partial subview of memref which guarantees no dimension // overflows. @@ -304,7 +304,7 @@ Location loc = xferOp.getLoc(); scf::IfOp fullPartialIfOp; Value zero = b.create(loc, 0); - Value memref = xferOp.source(); + Value memref = xferOp.getSource(); return b.create( loc, returnTypes, inBoundsCond, [&](OpBuilder &b, Location loc) { @@ -312,8 +312,8 @@ if (compatibleMemRefType != xferOp.getShapedType()) res = b.create(loc, compatibleMemRefType, memref); scf::ValueVector viewAndIndices{res}; - viewAndIndices.insert(viewAndIndices.end(), xferOp.indices().begin(), - xferOp.indices().end()); + viewAndIndices.insert(viewAndIndices.end(), xferOp.getIndices().begin(), + xferOp.getIndices().end()); b.create(loc, viewAndIndices); }, [&](OpBuilder &b, Location loc) { @@ -354,7 +354,7 @@ MemRefType compatibleMemRefType, Value alloc) { Location loc = xferOp.getLoc(); Value zero = b.create(loc, 0); - Value memref = xferOp.source(); + Value memref = xferOp.getSource(); return b .create( loc, returnTypes, inBoundsCond, @@ -364,8 +364,8 @@ res = b.create(loc, compatibleMemRefType, memref); scf::ValueVector viewAndIndices{res}; viewAndIndices.insert(viewAndIndices.end(), - xferOp.indices().begin(), - xferOp.indices().end()); + xferOp.getIndices().begin(), + xferOp.getIndices().end()); b.create(loc, viewAndIndices); }, [&](OpBuilder &b, Location loc) { @@ -430,9 +430,10 @@ b.create(loc, notInBounds, [&](OpBuilder &b, Location loc) { BlockAndValueMapping mapping; Value load = b.create( - loc, b.create( - loc, MemRefType::get({}, xferOp.vector().getType()), alloc)); - mapping.map(xferOp.vector(), load); + loc, + b.create( + loc, MemRefType::get({}, xferOp.getVector().getType()), alloc)); + mapping.map(xferOp.getVector(), load); b.clone(*xferOp.getOperation(), mapping); b.create(loc, ValueRange{}); }); @@ -530,9 +531,9 @@ if (!(xferReadOp || xferWriteOp)) return failure(); - if (xferWriteOp && xferWriteOp.mask()) + if (xferWriteOp && xferWriteOp.getMask()) return failure(); - if (xferReadOp && xferReadOp.mask()) + if (xferReadOp && xferReadOp.getMask()) return failure(); } @@ -601,8 +602,8 @@ // The operation is cloned to prevent deleting information needed for the // later IR creation. BlockAndValueMapping mapping; - mapping.map(xferWriteOp.source(), memrefAndIndices.front()); - mapping.map(xferWriteOp.indices(), memrefAndIndices.drop_front()); + mapping.map(xferWriteOp.getSource(), memrefAndIndices.front()); + mapping.map(xferWriteOp.getIndices(), memrefAndIndices.drop_front()); auto *clone = b.clone(*xferWriteOp, mapping); clone->setAttr(xferWriteOp.getInBoundsAttrName(), inBoundsAttr); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -167,19 +167,19 @@ PatternRewriter &rewriter) const override { // Check if 'shapeCastOp' has vector source/result type. auto sourceVectorType = - shapeCastOp.source().getType().dyn_cast_or_null(); + shapeCastOp.getSource().getType().dyn_cast_or_null(); auto resultVectorType = - shapeCastOp.result().getType().dyn_cast_or_null(); + shapeCastOp.getResult().getType().dyn_cast_or_null(); if (!sourceVectorType || !resultVectorType) return failure(); // Check if shape cast op source operand is also a shape cast op. auto sourceShapeCastOp = dyn_cast_or_null( - shapeCastOp.source().getDefiningOp()); + shapeCastOp.getSource().getDefiningOp()); if (!sourceShapeCastOp) return failure(); auto operandSourceVectorType = - sourceShapeCastOp.source().getType().cast(); + sourceShapeCastOp.getSource().getType().cast(); auto operandResultVectorType = sourceShapeCastOp.getType(); // Check if shape cast operations invert each other. @@ -187,7 +187,7 @@ operandResultVectorType != sourceVectorType) return failure(); - rewriter.replaceOp(shapeCastOp, sourceShapeCastOp.source()); + rewriter.replaceOp(shapeCastOp, sourceShapeCastOp.getSource()); return success(); } }; @@ -206,7 +206,7 @@ // Scalar to any vector can use splat. if (!srcType) { - rewriter.replaceOpWithNewOp(op, dstType, op.source()); + rewriter.replaceOpWithNewOp(op, dstType, op.getSource()); return success(); } @@ -218,9 +218,9 @@ if (srcRank <= 1 && dstRank == 1) { Value ext; if (srcRank == 0) - ext = rewriter.create(loc, op.source()); + ext = rewriter.create(loc, op.getSource()); else - ext = rewriter.create(loc, op.source(), 0); + ext = rewriter.create(loc, op.getSource(), 0); rewriter.replaceOpWithNewOp(op, dstType, ext); return success(); } @@ -239,7 +239,7 @@ VectorType resType = VectorType::get(dstType.getShape().drop_front(), eltType); Value bcst = - rewriter.create(loc, resType, op.source()); + rewriter.create(loc, resType, op.getSource()); Value result = rewriter.create( loc, dstType, rewriter.getZeroAttr(dstType)); for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) @@ -259,7 +259,7 @@ // All trailing dimensions are the same. Simply pass through. if (m == -1) { - rewriter.replaceOp(op, op.source()); + rewriter.replaceOp(op, op.getSource()); return success(); } @@ -284,14 +284,14 @@ loc, dstType, rewriter.getZeroAttr(dstType)); if (m == 0) { // Stetch at start. - Value ext = rewriter.create(loc, op.source(), 0); + Value ext = rewriter.create(loc, op.getSource(), 0); Value bcst = rewriter.create(loc, resType, ext); for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) result = rewriter.create(loc, bcst, result, d); } else { // Stetch not at start. for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) { - Value ext = rewriter.create(loc, op.source(), d); + Value ext = rewriter.create(loc, op.getSource(), d); Value bcst = rewriter.create(loc, resType, ext); result = rewriter.create(loc, bcst, result, d); } @@ -337,13 +337,13 @@ PatternRewriter &rewriter) const override { auto loc = op.getLoc(); - Value input = op.vector(); + Value input = op.getVector(); VectorType inputType = op.getVectorType(); VectorType resType = op.getResultType(); // Set up convenience transposition table. SmallVector transp; - for (auto attr : op.transp()) + for (auto attr : op.getTransp()) transp.push_back(attr.cast().getInt()); if (vectorTransformOptions.vectorTransposeLowering == @@ -432,7 +432,7 @@ return rewriter.notifyMatchFailure(op, "Not a 2D transpose"); SmallVector transp; - for (auto attr : op.transp()) + for (auto attr : op.getTransp()) transp.push_back(attr.cast().getInt()); if (transp[0] != 1 && transp[1] != 0) return rewriter.notifyMatchFailure(op, "Not a 2D transpose permutation"); @@ -443,7 +443,8 @@ int64_t m = srcType.getShape().front(), n = srcType.getShape().back(); Value casted = rewriter.create( - loc, VectorType::get({m * n}, srcType.getElementType()), op.vector()); + loc, VectorType::get({m * n}, srcType.getElementType()), + op.getVector()); SmallVector mask; mask.reserve(m * n); for (int64_t j = 0; j < n; ++j) @@ -489,15 +490,15 @@ VectorType resType = op.getVectorType(); Type eltType = resType.getElementType(); bool isInt = eltType.isa(); - Value acc = (op.acc().empty()) ? nullptr : op.acc()[0]; - vector::CombiningKind kind = op.kind(); + Value acc = (op.getAcc().empty()) ? nullptr : op.getAcc()[0]; + vector::CombiningKind kind = op.getKind(); if (!rhsType) { // Special case: AXPY operation. - Value b = rewriter.create(loc, lhsType, op.rhs()); + Value b = rewriter.create(loc, lhsType, op.getRhs()); Optional mult = - isInt ? genMultI(loc, op.lhs(), b, acc, kind, rewriter) - : genMultF(loc, op.lhs(), b, acc, kind, rewriter); + isInt ? genMultI(loc, op.getLhs(), b, acc, kind, rewriter) + : genMultF(loc, op.getLhs(), b, acc, kind, rewriter); if (!mult.hasValue()) return failure(); rewriter.replaceOp(op, mult.getValue()); @@ -508,13 +509,15 @@ loc, resType, rewriter.getZeroAttr(resType)); for (int64_t d = 0, e = resType.getDimSize(0); d < e; ++d) { auto pos = rewriter.getI64ArrayAttr(d); - Value x = rewriter.create(loc, eltType, op.lhs(), pos); + Value x = + rewriter.create(loc, eltType, op.getLhs(), pos); Value a = rewriter.create(loc, rhsType, x); Value r = nullptr; if (acc) r = rewriter.create(loc, rhsType, acc, pos); - Optional m = isInt ? genMultI(loc, a, op.rhs(), r, kind, rewriter) - : genMultF(loc, a, op.rhs(), r, kind, rewriter); + Optional m = + isInt ? genMultI(loc, a, op.getRhs(), r, kind, rewriter) + : genMultF(loc, a, op.getRhs(), r, kind, rewriter); if (!m.hasValue()) return failure(); result = rewriter.create(loc, resType, m.getValue(), @@ -587,7 +590,7 @@ auto loc = op.getLoc(); auto dstType = op.getType(); auto eltType = dstType.getElementType(); - auto dimSizes = op.mask_dim_sizes(); + auto dimSizes = op.getMaskDimSizes(); int64_t rank = dstType.getRank(); if (rank == 0) { @@ -707,7 +710,7 @@ loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); unsigned mostMinorVectorSize = sourceVectorType.getShape()[1]; for (int64_t i = 0, e = sourceVectorType.getShape().front(); i != e; ++i) { - Value vec = rewriter.create(loc, op.source(), i); + Value vec = rewriter.create(loc, op.getSource(), i); desc = rewriter.create( loc, vec, desc, /*offsets=*/i * mostMinorVectorSize, /*strides=*/1); @@ -741,7 +744,7 @@ unsigned mostMinorVectorSize = resultVectorType.getShape()[1]; for (int64_t i = 0, e = resultVectorType.getShape().front(); i != e; ++i) { Value vec = rewriter.create( - loc, op.source(), /*offsets=*/i * mostMinorVectorSize, + loc, op.getSource(), /*offsets=*/i * mostMinorVectorSize, /*sizes=*/mostMinorVectorSize, /*strides=*/1); desc = rewriter.create(loc, vec, desc, i); @@ -796,7 +799,7 @@ incIdx(srcIdx, sourceVectorType, srcRank - 1); incIdx(resIdx, resultVectorType, resRank - 1); } - Value e = rewriter.create(loc, op.source(), srcIdx); + Value e = rewriter.create(loc, op.getSource(), srcIdx); result = rewriter.create(loc, e, result, resIdx); } rewriter.replaceOp(op, result); @@ -836,9 +839,9 @@ LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp, PatternRewriter &rewriter) const override { - if (reduceOp.kind() != vector::CombiningKind::ADD) + if (reduceOp.getKind() != vector::CombiningKind::ADD) return failure(); - Operation *mulOp = reduceOp.source().getDefiningOp(); + Operation *mulOp = reduceOp.getSource().getDefiningOp(); if (!mulOp || !isa(mulOp)) return failure(); SmallVector reductionMask = reduceOp.getReductionMask(); @@ -897,8 +900,8 @@ PatternRewriter &rewriter) const override { SmallVector maps = llvm::to_vector<4>(contractOp.getIndexingMaps()); - Value lhs = contractOp.lhs(); - Value rhs = contractOp.rhs(); + Value lhs = contractOp.getLhs(); + Value rhs = contractOp.getRhs(); size_t index = 0; bool changed = false; for (Value *operand : {&lhs, &rhs}) { @@ -909,17 +912,17 @@ SmallVector perm; transposeOp.getTransp(perm); AffineMap permutationMap = AffineMap::getPermutationMap( - extractVector(transposeOp.transp()), + extractVector(transposeOp.getTransp()), contractOp.getContext()); map = inversePermutation(permutationMap).compose(map); - *operand = transposeOp.vector(); + *operand = transposeOp.getVector(); changed = true; } if (!changed) return failure(); rewriter.replaceOpWithNewOp( - contractOp, lhs, rhs, contractOp.acc(), - rewriter.getAffineMapArrayAttr(maps), contractOp.iterator_types()); + contractOp, lhs, rhs, contractOp.getAcc(), + rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); return success(); } }; @@ -954,8 +957,8 @@ PatternRewriter &rewriter) const override { SmallVector maps = llvm::to_vector<4>(contractOp.getIndexingMaps()); - Value lhs = contractOp.lhs(); - Value rhs = contractOp.rhs(); + Value lhs = contractOp.getLhs(); + Value rhs = contractOp.getRhs(); size_t index = 0; bool changed = false; for (Value *operand : {&lhs, &rhs}) { @@ -988,14 +991,14 @@ AffineMap::get(broadcast.getVectorType().getRank(), 0, originalDims, contractOp.getContext()); map = broadcastMap.compose(map); - *operand = broadcast.source(); + *operand = broadcast.getSource(); changed = true; } if (!changed) return failure(); rewriter.replaceOpWithNewOp( - contractOp, lhs, rhs, contractOp.acc(), - rewriter.getAffineMapArrayAttr(maps), contractOp.iterator_types()); + contractOp, lhs, rhs, contractOp.getAcc(), + rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); return success(); } }; @@ -1028,7 +1031,7 @@ Type castResTy = getElementTypeOrSelf(op->getResult(0)); if (auto vecTy = bcastOp.getSourceType().dyn_cast()) castResTy = VectorType::get(vecTy.getShape(), castResTy); - OperationState state(op->getLoc(), op->getName(), bcastOp.source(), + OperationState state(op->getLoc(), op->getName(), bcastOp.getSource(), castResTy, op->getAttrs()); auto castOp = rewriter.createOperation(state); rewriter.replaceOpWithNewOp( @@ -1068,7 +1071,7 @@ auto castResTy = transpOp.getVectorType(); castResTy = VectorType::get(castResTy.getShape(), getElementTypeOrSelf(op->getResult(0))); - OperationState state(op->getLoc(), op->getName(), transpOp.vector(), + OperationState state(op->getLoc(), op->getName(), transpOp.getVector(), castResTy, op->getAttrs()); auto castOp = rewriter.createOperation(state); rewriter.replaceOpWithNewOp( @@ -1121,7 +1124,7 @@ ContractionOpToMatmulOpLowering::matchAndRewrite(vector::ContractionOp op, PatternRewriter &rew) const { // TODO: implement masks - if (llvm::size(op.masks()) != 0) + if (llvm::size(op.getMasks()) != 0) return failure(); if (vectorTransformOptions.vectorContractLowering != vector::VectorContractLowering::Matmul) @@ -1129,7 +1132,7 @@ if (failed(filter(op))) return failure(); - auto iteratorTypes = op.iterator_types().getValue(); + auto iteratorTypes = op.getIteratorTypes().getValue(); if (!isParallelIterator(iteratorTypes[0]) || !isParallelIterator(iteratorTypes[1]) || !isReductionIterator(iteratorTypes[2])) @@ -1146,16 +1149,16 @@ AffineExpr m, n, k; bindDims(rew.getContext(), m, n, k); // LHS must be A(m, k) or A(k, m). - Value lhs = op.lhs(); - auto lhsMap = op.indexing_maps()[0]; + Value lhs = op.getLhs(); + auto lhsMap = op.getIndexingMaps()[0]; if (lhsMap == AffineMap::get(3, 0, {k, m}, ctx)) lhs = rew.create(loc, lhs, ArrayRef{1, 0}); else if (lhsMap != AffineMap::get(3, 0, {m, k}, ctx)) return failure(); // RHS must be B(k, n) or B(n, k). - Value rhs = op.rhs(); - auto rhsMap = op.indexing_maps()[1]; + Value rhs = op.getRhs(); + auto rhsMap = op.getIndexingMaps()[1]; if (rhsMap == AffineMap::get(3, 0, {n, k}, ctx)) rhs = rew.create(loc, rhs, ArrayRef{1, 0}); else if (rhsMap != AffineMap::get(3, 0, {k, n}, ctx)) @@ -1181,11 +1184,11 @@ mul = rew.create( loc, VectorType::get({lhsRows, rhsColumns}, - getElementTypeOrSelf(op.acc().getType())), + getElementTypeOrSelf(op.getAcc().getType())), mul); // ACC must be C(m, n) or C(n, m). - auto accMap = op.indexing_maps()[2]; + auto accMap = op.getIndexingMaps()[2]; if (accMap == AffineMap::get(3, 0, {n, m}, ctx)) mul = rew.create(loc, mul, ArrayRef{1, 0}); else if (accMap != AffineMap::get(3, 0, {m, n}, ctx)) @@ -1193,8 +1196,9 @@ Value res = elementType.isa() - ? static_cast(rew.create(loc, op.acc(), mul)) - : static_cast(rew.create(loc, op.acc(), mul)); + ? static_cast(rew.create(loc, op.getAcc(), mul)) + : static_cast( + rew.create(loc, op.getAcc(), mul)); rew.replaceOp(op, res); return success(); @@ -1220,11 +1224,10 @@ /// This unrolls outer-products along the reduction dimension. struct UnrolledOuterProductGenerator : public StructuredGenerator { - UnrolledOuterProductGenerator(OpBuilder &builder, vector::ContractionOp op) : StructuredGenerator(builder, op), - kind(op.kind()), lhs(op.lhs()), rhs(op.rhs()), res(op.acc()), - lhsType(op.getLhsType()) {} + kind(op.getKind()), lhs(op.getLhs()), rhs(op.getRhs()), + res(op.getAcc()), lhsType(op.getLhsType()) {} Value t(Value v) { static constexpr std::array perm = {1, 0}; @@ -1350,7 +1353,7 @@ LogicalResult ContractionOpToOuterProductOpLowering::matchAndRewrite( vector::ContractionOp op, PatternRewriter &rewriter) const { // TODO: implement masks - if (llvm::size(op.masks()) != 0) + if (llvm::size(op.getMasks()) != 0) return failure(); if (vectorTransformOptions.vectorContractLowering != @@ -1384,7 +1387,7 @@ ContractionOpToDotLowering::matchAndRewrite(vector::ContractionOp op, PatternRewriter &rewriter) const { // TODO: implement masks - if (llvm::size(op.masks()) != 0) + if (llvm::size(op.getMasks()) != 0) return failure(); if (failed(filter(op))) @@ -1394,10 +1397,10 @@ vector::VectorContractLowering::Dot) return failure(); - auto iteratorTypes = op.iterator_types().getValue(); + auto iteratorTypes = op.getIteratorTypes().getValue(); static constexpr std::array perm = {1, 0}; Location loc = op.getLoc(); - Value lhs = op.lhs(), rhs = op.rhs(); + Value lhs = op.getLhs(), rhs = op.getRhs(); using MapList = ArrayRef>; auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; @@ -1489,7 +1492,7 @@ res = rewriter.create(op.getLoc(), reduced, res, pos); } } - if (auto acc = op.acc()) + if (auto acc = op.getAcc()) res = createAdd(op.getLoc(), res, acc, isInt, rewriter); rewriter.replaceOp(op, res); return success(); @@ -1516,7 +1519,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op, PatternRewriter &rewriter) const { // TODO: implement masks. - if (llvm::size(op.masks()) != 0) + if (llvm::size(op.getMasks()) != 0) return failure(); if (failed(filter(op))) @@ -1621,15 +1624,15 @@ adjustMap(iMap[2], iterIndex, rewriter)}; auto lowAffine = rewriter.getAffineMapArrayAttr(lowIndexingMaps); auto lowIter = - rewriter.getArrayAttr(adjustIter(op.iterator_types(), iterIndex)); + rewriter.getArrayAttr(adjustIter(op.getIteratorTypes(), iterIndex)); // Unroll into a series of lower dimensional vector.contract ops. Location loc = op.getLoc(); Value result = rewriter.create( loc, resType, rewriter.getZeroAttr(resType)); for (int64_t d = 0; d < dimSize; ++d) { - auto lhs = reshapeLoad(loc, op.lhs(), lhsType, lhsIndex, d, rewriter); - auto rhs = reshapeLoad(loc, op.rhs(), rhsType, rhsIndex, d, rewriter); - auto acc = reshapeLoad(loc, op.acc(), resType, resIndex, d, rewriter); + auto lhs = reshapeLoad(loc, op.getLhs(), lhsType, lhsIndex, d, rewriter); + auto rhs = reshapeLoad(loc, op.getRhs(), rhsType, rhsIndex, d, rewriter); + auto acc = reshapeLoad(loc, op.getAcc(), resType, resIndex, d, rewriter); Value lowContract = rewriter.create( loc, lhs, rhs, acc, lowAffine, lowIter); result = @@ -1661,10 +1664,10 @@ // Base case. if (lhsType.getRank() == 1) { assert(rhsType.getRank() == 1 && "corrupt contraction"); - Value m = createMul(loc, op.lhs(), op.rhs(), isInt, rewriter); + Value m = createMul(loc, op.getLhs(), op.getRhs(), isInt, rewriter); auto kind = vector::CombiningKind::ADD; Value res = rewriter.create(loc, kind, m); - if (auto acc = op.acc()) + if (auto acc = op.getAcc()) res = createAdd(op.getLoc(), res, acc, isInt, rewriter); return res; } @@ -1675,15 +1678,15 @@ adjustMap(iMap[2], iterIndex, rewriter)}; auto lowAffine = rewriter.getAffineMapArrayAttr(lowIndexingMaps); auto lowIter = - rewriter.getArrayAttr(adjustIter(op.iterator_types(), iterIndex)); + rewriter.getArrayAttr(adjustIter(op.getIteratorTypes(), iterIndex)); // Unroll into a series of lower dimensional vector.contract ops. // By feeding the initial accumulator into the first contraction, // and the result of each contraction into the next, eventually // the sum of all reductions is computed. - Value result = op.acc(); + Value result = op.getAcc(); for (int64_t d = 0; d < dimSize; ++d) { - auto lhs = reshapeLoad(loc, op.lhs(), lhsType, lhsIndex, d, rewriter); - auto rhs = reshapeLoad(loc, op.rhs(), rhsType, rhsIndex, d, rewriter); + auto lhs = reshapeLoad(loc, op.getLhs(), lhsType, lhsIndex, d, rewriter); + auto rhs = reshapeLoad(loc, op.getRhs(), rhsType, rhsIndex, d, rewriter); result = rewriter.create(loc, lhs, rhs, result, lowAffine, lowIter); } @@ -1747,7 +1750,7 @@ // Permutations are handled by VectorToSCF or // populateVectorTransferPermutationMapLoweringPatterns. // We let the 0-d corner case pass-through as it is supported. - if (!read.permutation_map().isMinorIdentityWithBroadcasting( + if (!read.getPermutationMap().isMinorIdentityWithBroadcasting( &broadcastedDims)) return failure(); @@ -1786,16 +1789,16 @@ // Create vector load op. Operation *loadOp; - if (read.mask()) { + if (read.getMask()) { Value fill = rewriter.create( - read.getLoc(), unbroadcastedVectorType, read.padding()); + read.getLoc(), unbroadcastedVectorType, read.getPadding()); loadOp = rewriter.create( - read.getLoc(), unbroadcastedVectorType, read.source(), read.indices(), - read.mask(), fill); + read.getLoc(), unbroadcastedVectorType, read.getSource(), + read.getIndices(), read.getMask(), fill); } else { - loadOp = rewriter.create(read.getLoc(), - unbroadcastedVectorType, - read.source(), read.indices()); + loadOp = rewriter.create( + read.getLoc(), unbroadcastedVectorType, read.getSource(), + read.getIndices()); } // Insert a broadcasting op if required. @@ -1830,7 +1833,7 @@ if (vecType.getNumElements() != 1) return failure(); auto memrefLoad = rewriter.create( - loadOp.getLoc(), loadOp.base(), loadOp.indices()); + loadOp.getLoc(), loadOp.getBase(), loadOp.getIndices()); rewriter.replaceOpWithNewOp(loadOp, vecType, memrefLoad); return success(); @@ -1851,15 +1854,15 @@ if (vecType.getRank() == 0) { // TODO: Unifiy once ExtractOp supports 0-d vectors. extracted = rewriter.create( - storeOp.getLoc(), storeOp.valueToStore()); + storeOp.getLoc(), storeOp.getValueToStore()); } else { SmallVector indices(vecType.getRank(), 0); extracted = rewriter.create( - storeOp.getLoc(), storeOp.valueToStore(), indices); + storeOp.getLoc(), storeOp.getValueToStore(), indices); } rewriter.replaceOpWithNewOp( - storeOp, extracted, storeOp.base(), storeOp.indices()); + storeOp, extracted, storeOp.getBase(), storeOp.getIndices()); return success(); } }; @@ -1887,7 +1890,7 @@ // Permutations are handled by VectorToSCF or // populateVectorTransferPermutationMapLoweringPatterns. if ( // pass-through for the 0-d corner case. - !write.permutation_map().isMinorIdentity()) + !write.getPermutationMap().isMinorIdentity()) return failure(); auto memRefType = write.getShapedType().dyn_cast(); @@ -1912,12 +1915,13 @@ // Out-of-bounds dims are handled by MaterializeTransferMask. if (write.hasOutOfBoundsDim()) return failure(); - if (write.mask()) { + if (write.getMask()) { rewriter.replaceOpWithNewOp( - write, write.source(), write.indices(), write.mask(), write.vector()); + write, write.getSource(), write.getIndices(), write.getMask(), + write.getVector()); } else { rewriter.replaceOpWithNewOp( - write, write.vector(), write.source(), write.indices()); + write, write.getVector(), write.getSource(), write.getIndices()); } return success(); } @@ -1951,7 +1955,7 @@ if (extractOp.getVectorType().getRank() != 1) return failure(); - auto castOp = extractOp.vector().getDefiningOp(); + auto castOp = extractOp.getVector().getDefiningOp(); if (!castOp) return failure(); @@ -1977,14 +1981,14 @@ return (*attr.getAsValueRange().begin()).getZExtValue(); }; - uint64_t index = getFirstIntValue(extractOp.position()); + uint64_t index = getFirstIntValue(extractOp.getPosition()); // Get the single scalar (as a vector) in the source value that packs the // desired scalar. E.g. extract vector<1xf32> from vector<4xf32> VectorType oneScalarType = VectorType::get({1}, castSrcType.getElementType()); Value packedValue = rewriter.create( - extractOp.getLoc(), oneScalarType, castOp.source(), + extractOp.getLoc(), oneScalarType, castOp.getSource(), rewriter.getI64ArrayAttr(index / expandRatio)); // Cast it to a vector with the desired scalar's type. @@ -2021,7 +2025,7 @@ LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp, PatternRewriter &rewriter) const override { - auto castOp = extractOp.vector().getDefiningOp(); + auto castOp = extractOp.getVector().getDefiningOp(); if (!castOp) return failure(); @@ -2036,7 +2040,7 @@ return failure(); // Only accept all one strides for now. - if (llvm::any_of(extractOp.strides().getAsValueRange(), + if (llvm::any_of(extractOp.getStrides().getAsValueRange(), [](const APInt &val) { return !val.isOneValue(); })) return failure(); @@ -2048,7 +2052,7 @@ // are selecting the full range for the last bitcasted dimension; other // dimensions aren't affected. Otherwise, we need to scale down the last // dimension's offset given we are extracting from less elements now. - ArrayAttr newOffsets = extractOp.offsets(); + ArrayAttr newOffsets = extractOp.getOffsets(); if (newOffsets.size() == rank) { SmallVector offsets = getIntValueVector(newOffsets); if (offsets.back() % expandRatio != 0) @@ -2058,7 +2062,7 @@ } // Similarly for sizes. - ArrayAttr newSizes = extractOp.sizes(); + ArrayAttr newSizes = extractOp.getSizes(); if (newSizes.size() == rank) { SmallVector sizes = getIntValueVector(newSizes); if (sizes.back() % expandRatio != 0) @@ -2074,8 +2078,8 @@ VectorType::get(dims, castSrcType.getElementType()); auto newExtractOp = rewriter.create( - extractOp.getLoc(), newExtractType, castOp.source(), newOffsets, - newSizes, extractOp.strides()); + extractOp.getLoc(), newExtractType, castOp.getSource(), newOffsets, + newSizes, extractOp.getStrides()); rewriter.replaceOpWithNewOp( extractOp, extractOp.getType(), newExtractOp); @@ -2114,12 +2118,12 @@ int64_t shrinkRatio = castSrcLastDim / castDstLastDim; auto insertOp = - bitcastOp.source().getDefiningOp(); + bitcastOp.getSource().getDefiningOp(); if (!insertOp) return failure(); // Only accept all one strides for now. - if (llvm::any_of(insertOp.strides().getAsValueRange(), + if (llvm::any_of(insertOp.getStrides().getAsValueRange(), [](const APInt &val) { return !val.isOneValue(); })) return failure(); @@ -2129,7 +2133,7 @@ if (rank != insertOp.getDestVectorType().getRank()) return failure(); - ArrayAttr newOffsets = insertOp.offsets(); + ArrayAttr newOffsets = insertOp.getOffsets(); assert(newOffsets.size() == rank); SmallVector offsets = getIntValueVector(newOffsets); if (offsets.back() % shrinkRatio != 0) @@ -2144,7 +2148,7 @@ VectorType::get(srcDims, castDstType.getElementType()); auto newCastSrcOp = rewriter.create( - bitcastOp.getLoc(), newCastSrcType, insertOp.source()); + bitcastOp.getLoc(), newCastSrcType, insertOp.getSource()); SmallVector dstDims = llvm::to_vector<4>(insertOp.getDestVectorType().getShape()); @@ -2153,11 +2157,11 @@ VectorType::get(dstDims, castDstType.getElementType()); auto newCastDstOp = rewriter.create( - bitcastOp.getLoc(), newCastDstType, insertOp.dest()); + bitcastOp.getLoc(), newCastDstType, insertOp.getDest()); rewriter.replaceOpWithNewOp( bitcastOp, bitcastOp.getType(), newCastSrcOp, newCastDstOp, newOffsets, - insertOp.strides()); + insertOp.getStrides()); return success(); } @@ -2244,7 +2248,7 @@ return failure(); if (xferOp.getVectorType().getRank() > 1 || - llvm::size(xferOp.indices()) == 0) + llvm::size(xferOp.getIndices()) == 0) return failure(); Location loc = xferOp->getLoc(); @@ -2255,24 +2259,24 @@ // // TODO: when the leaf transfer rank is k > 1, we need the last `k` // dimensions here. - unsigned lastIndex = llvm::size(xferOp.indices()) - 1; - Value off = xferOp.indices()[lastIndex]; + unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1; + Value off = xferOp.getIndices()[lastIndex]; Value dim = - vector::createOrFoldDimOp(rewriter, loc, xferOp.source(), lastIndex); + vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex); Value b = rewriter.create(loc, dim.getType(), dim, off); Value mask = rewriter.create( loc, VectorType::get(vtp.getShape(), rewriter.getI1Type(), vtp.getNumScalableDims()), b); - if (xferOp.mask()) { + if (xferOp.getMask()) { // Intersect the in-bounds with the mask specified as an op parameter. - mask = rewriter.create(loc, mask, xferOp.mask()); + mask = rewriter.create(loc, mask, xferOp.getMask()); } rewriter.updateRootInPlace(xferOp, [&]() { - xferOp.maskMutable().assign(mask); - xferOp.in_boundsAttr(rewriter.getBoolArrayAttr({true})); + xferOp.getMaskMutable().assign(mask); + xferOp.setInBoundsAttr(rewriter.getBoolArrayAttr({true})); }); return success(); @@ -2319,14 +2323,14 @@ return failure(); // TODO: support mask. - if (readOp.mask()) + if (readOp.getMask()) return failure(); - auto srcType = readOp.source().getType().dyn_cast(); + auto srcType = readOp.getSource().getType().dyn_cast(); if (!srcType || !srcType.hasStaticShape()) return failure(); - if (!readOp.permutation_map().isMinorIdentity()) + if (!readOp.getPermutationMap().isMinorIdentity()) return failure(); auto targetType = readOp.getVectorType(); @@ -2379,19 +2383,19 @@ SmallVector strides(srcType.getRank(), 1); ArrayAttr inBoundsAttr = - readOp.in_bounds() + readOp.getInBounds() ? rewriter.getArrayAttr( - readOp.in_boundsAttr().getValue().drop_back(dimsToDrop)) + readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)) : ArrayAttr(); Value rankedReducedView = rewriter.create( - loc, resultMemrefType, readOp.source(), offsets, srcType.getShape(), + loc, resultMemrefType, readOp.getSource(), offsets, srcType.getShape(), strides); auto permMap = getTransferMinorIdentityMap( rankedReducedView.getType().cast(), resultTargetVecType); Value result = rewriter.create( loc, resultTargetVecType, rankedReducedView, - readOp.indices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), - readOp.padding(), + readOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), + readOp.getPadding(), // TODO: support mask. /*mask=*/Value(), inBoundsAttr); rewriter.replaceOpWithNewOp(readOp, targetType, @@ -2527,14 +2531,14 @@ ArrayRef destShape = destType.getShape(); auto elType = destType.getElementType(); bool isInt = elType.isIntOrIndex(); - if (!isValidKind(isInt, scanOp.kind())) + if (!isValidKind(isInt, scanOp.getKind())) return failure(); VectorType resType = VectorType::get(destShape, elType); Value result = rewriter.create( loc, resType, rewriter.getZeroAttr(resType)); - int64_t reductionDim = scanOp.reduction_dim(); - bool inclusive = scanOp.inclusive(); + int64_t reductionDim = scanOp.getReductionDim(); + bool inclusive = scanOp.getInclusive(); int64_t destRank = destType.getRank(); VectorType initialValueType = scanOp.getInitialValueType(); int64_t initialValueRank = initialValueType.getRank(); @@ -2554,7 +2558,7 @@ offsets[reductionDim] = i; ArrayAttr scanOffsets = rewriter.getI64ArrayAttr(offsets); Value input = rewriter.create( - loc, reductionType, scanOp.source(), scanOffsets, scanSizes, + loc, reductionType, scanOp.getSource(), scanOffsets, scanSizes, scanStrides); Value output; if (i == 0) { @@ -2564,15 +2568,15 @@ if (initialValueRank == 0) { // ShapeCastOp cannot handle 0-D vectors output = rewriter.create( - loc, input.getType(), scanOp.initial_value()); + loc, input.getType(), scanOp.getInitialValue()); } else { output = rewriter.create( - loc, input.getType(), scanOp.initial_value()); + loc, input.getType(), scanOp.getInitialValue()); } } } else { Value y = inclusive ? input : lastInput; - output = genOperator(loc, lastOutput, y, scanOp.kind(), rewriter); + output = genOperator(loc, lastOutput, y, scanOp.getKind(), rewriter); assert(output != nullptr); } result = rewriter.create( diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnrollDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnrollDistribute.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorUnrollDistribute.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnrollDistribute.cpp @@ -112,7 +112,7 @@ // TODO: support 0-d corner case. if (readOp.getTransferRank() == 0) return failure(); - if (readOp.mask()) + if (readOp.getMask()) return failure(); auto targetShape = getTargetShape(options, readOp); if (!targetShape) @@ -129,16 +129,16 @@ loc, sourceVectorType, rewriter.getZeroAttr(sourceVectorType)); auto targetType = VectorType::get(*targetShape, sourceVectorType.getElementType()); - SmallVector originalIndices(readOp.indices().begin(), - readOp.indices().end()); + SmallVector originalIndices(readOp.getIndices().begin(), + readOp.getIndices().end()); for (int64_t i = 0; i < sliceCount; i++) { SmallVector indices = sliceTransferIndices(i, originalSize, *targetShape, originalIndices, - readOp.permutation_map(), loc, rewriter); + readOp.getPermutationMap(), loc, rewriter); auto slicedRead = rewriter.create( - loc, targetType, readOp.source(), indices, - readOp.permutation_mapAttr(), readOp.padding(), readOp.mask(), - readOp.in_boundsAttr()); + loc, targetType, readOp.getSource(), indices, + readOp.getPermutationMapAttr(), readOp.getPadding(), readOp.getMask(), + readOp.getInBoundsAttr()); SmallVector elementOffsets = getVectorOffset(originalSize, *targetShape, i); @@ -165,7 +165,7 @@ if (writeOp.getTransferRank() == 0) return failure(); - if (writeOp.mask()) + if (writeOp.getMask()) return failure(); auto targetShape = getTargetShape(options, writeOp); if (!targetShape) @@ -177,21 +177,21 @@ SmallVector ratio = *shapeRatio(originalSize, *targetShape); // Compute shape ratio of 'shape' and 'sizes'. int64_t sliceCount = computeMaxLinearIndex(ratio); - SmallVector originalIndices(writeOp.indices().begin(), - writeOp.indices().end()); + SmallVector originalIndices(writeOp.getIndices().begin(), + writeOp.getIndices().end()); Value resultTensor; for (int64_t i = 0; i < sliceCount; i++) { SmallVector elementOffsets = getVectorOffset(originalSize, *targetShape, i); Value slicedVector = rewriter.create( - loc, writeOp.vector(), elementOffsets, *targetShape, strides); + loc, writeOp.getVector(), elementOffsets, *targetShape, strides); SmallVector indices = sliceTransferIndices(i, originalSize, *targetShape, originalIndices, - writeOp.permutation_map(), loc, rewriter); + writeOp.getPermutationMap(), loc, rewriter); Operation *slicedWrite = rewriter.create( - loc, slicedVector, resultTensor ? resultTensor : writeOp.source(), - indices, writeOp.permutation_mapAttr(), writeOp.in_boundsAttr()); + loc, slicedVector, resultTensor ? resultTensor : writeOp.getSource(), + indices, writeOp.getPermutationMapAttr(), writeOp.getInBoundsAttr()); // For the tensor case update the destination for the next transfer write. if (!slicedWrite->getResults().empty()) resultTensor = slicedWrite->getResult(0); @@ -267,19 +267,21 @@ AffineMap lhsPermutationMap = contractOp.getIndexingMaps()[0]; SmallVector lhsOffets = applyPermutationMap(lhsPermutationMap, ArrayRef(offsets)); - extractOperand(0, contractOp.lhs(), lhsPermutationMap, lhsOffets); + extractOperand(0, contractOp.getLhs(), lhsPermutationMap, lhsOffets); // If there is a mask associated to lhs, extract it as well. if (slicesOperands.size() > 3) - extractOperand(3, contractOp.masks()[0], lhsPermutationMap, lhsOffets); + extractOperand(3, contractOp.getMasks()[0], lhsPermutationMap, + lhsOffets); // Extract the new rhs operand. AffineMap rhsPermutationMap = contractOp.getIndexingMaps()[1]; SmallVector rhsOffets = applyPermutationMap(rhsPermutationMap, ArrayRef(offsets)); - extractOperand(1, contractOp.rhs(), rhsPermutationMap, rhsOffets); + extractOperand(1, contractOp.getRhs(), rhsPermutationMap, rhsOffets); // If there is a mask associated to rhs, extract it as well. if (slicesOperands.size() > 4) - extractOperand(4, contractOp.masks()[1], rhsPermutationMap, rhsOffets); + extractOperand(4, contractOp.getMasks()[1], rhsPermutationMap, + rhsOffets); AffineMap accPermutationMap = contractOp.getIndexingMaps()[2]; SmallVector accOffets = @@ -290,7 +292,7 @@ if (accIt != accCache.end()) slicesOperands[2] = accIt->second; else - extractOperand(2, contractOp.acc(), accPermutationMap, accOffets); + extractOperand(2, contractOp.getAcc(), accPermutationMap, accOffets); SmallVector dstShape = applyPermutationMap(dstAffineMap, ArrayRef(*targetShape)); @@ -367,8 +369,8 @@ // reduction loop keeps updating the accumulator. auto accIt = accCache.find(destOffset); if (accIt != accCache.end()) - result = makeArithReduction(rewriter, loc, reductionOp.kind(), result, - accIt->second); + result = makeArithReduction(rewriter, loc, reductionOp.getKind(), + result, accIt->second); accCache[destOffset] = result; } // Assemble back the accumulator into a single vector. @@ -451,7 +453,7 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(vector::ExtractMapOp extract, PatternRewriter &rewriter) const override { - Operation *definedOp = extract.vector().getDefiningOp(); + Operation *definedOp = extract.getVector().getDefiningOp(); if (!definedOp || !OpTrait::hasElementwiseMappableTraits(definedOp) || definedOp->getNumResults() != 1) return failure(); @@ -467,7 +469,7 @@ loc, VectorType::get(extract.getResultType().getShape(), vecType.getElementType()), - operand.get(), extract.ids())); + operand.get(), extract.getIds())); } Operation *newOp = cloneOpWithOperandsAndTypes( rewriter, loc, definedOp, extractOperands, extract.getResultType()); @@ -482,7 +484,7 @@ using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(vector::ExtractMapOp extract, PatternRewriter &rewriter) const override { - Operation *definedOp = extract.vector().getDefiningOp(); + Operation *definedOp = extract.getVector().getDefiningOp(); auto contract = dyn_cast_or_null(definedOp); if (!contract) return failure(); @@ -514,7 +516,7 @@ VectorType newVecType = VectorType::get(operandShape, vecType.getElementType()); extractOperands.push_back(rewriter.create( - loc, newVecType, operand, extract.ids())); + loc, newVecType, operand, extract.getIds())); } Operation *newOp = cloneOpWithOperandsAndTypes(rewriter, loc, definedOp, extractOperands, @@ -554,11 +556,12 @@ dyn_cast(*read.getResult().getUsers().begin()); if (!extract) return failure(); - if (read.mask()) + if (read.getMask()) return failure(); - SmallVector indices(read.indices().begin(), read.indices().end()); - AffineMap indexMap = extract.map().compose(read.permutation_map()); + SmallVector indices(read.getIndices().begin(), + read.getIndices().end()); + AffineMap indexMap = extract.map().compose(read.getPermutationMap()); unsigned idCount = 0; ImplicitLocOpBuilder lb(read.getLoc(), rewriter); for (auto it : @@ -574,14 +577,15 @@ extract.getResultType().getDimSize(vectorPos), read.getContext()); indices[indexPos] = makeComposedAffineApply( rewriter, read.getLoc(), d0 + scale * d1, - {indices[indexPos], extract.ids()[idCount++]}); + {indices[indexPos], extract.getIds()[idCount++]}); } Value newRead = lb.create( - extract.getType(), read.source(), indices, read.permutation_mapAttr(), - read.padding(), read.mask(), read.in_boundsAttr()); + extract.getType(), read.getSource(), indices, + read.getPermutationMapAttr(), read.getPadding(), read.getMask(), + read.getInBoundsAttr()); Value dest = lb.create( read.getType(), rewriter.getZeroAttr(read.getType())); - newRead = lb.create(newRead, dest, extract.ids()); + newRead = lb.create(newRead, dest, extract.getIds()); rewriter.replaceOp(read, newRead); return success(); } @@ -597,14 +601,14 @@ if (write.getTransferRank() == 0) return failure(); - auto insert = write.vector().getDefiningOp(); + auto insert = write.getVector().getDefiningOp(); if (!insert) return failure(); - if (write.mask()) + if (write.getMask()) return failure(); - SmallVector indices(write.indices().begin(), - write.indices().end()); - AffineMap indexMap = insert.map().compose(write.permutation_map()); + SmallVector indices(write.getIndices().begin(), + write.getIndices().end()); + AffineMap indexMap = insert.map().compose(write.getPermutationMap()); unsigned idCount = 0; Location loc = write.getLoc(); for (auto it : @@ -619,13 +623,13 @@ auto scale = getAffineConstantExpr( insert.getSourceVectorType().getDimSize(vectorPos), write.getContext()); - indices[indexPos] = - makeComposedAffineApply(rewriter, loc, d0 + scale * d1, - {indices[indexPos], insert.ids()[idCount++]}); + indices[indexPos] = makeComposedAffineApply( + rewriter, loc, d0 + scale * d1, + {indices[indexPos], insert.getIds()[idCount++]}); } rewriter.create( - loc, insert.vector(), write.source(), indices, - write.permutation_mapAttr(), write.in_boundsAttr()); + loc, insert.getVector(), write.getSource(), indices, + write.getPermutationMapAttr(), write.getInBoundsAttr()); rewriter.eraseOp(write); return success(); } @@ -654,7 +658,7 @@ getVectorOffset(originalSize, *targetShape, i); SmallVector strides(offsets.size(), 1); Value slicedOperand = rewriter.create( - loc, reductionOp.vector(), offsets, *targetShape, strides); + loc, reductionOp.getVector(), offsets, *targetShape, strides); Operation *newOp = cloneOpWithOperandsAndTypes( rewriter, loc, reductionOp, slicedOperand, reductionOp.getType()); Value result = newOp->getResult(0); @@ -664,7 +668,7 @@ accumulator = result; } else { // On subsequent reduction, combine with the accumulator. - accumulator = makeArithReduction(rewriter, loc, reductionOp.kind(), + accumulator = makeArithReduction(rewriter, loc, reductionOp.getKind(), accumulator, result); } } diff --git a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp --- a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp +++ b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp @@ -264,7 +264,7 @@ return rewriter.notifyMatchFailure(op, "Unsupported vector type"); SmallVector transp; - for (auto attr : op.transp()) + for (auto attr : op.getTransp()) transp.push_back(attr.cast().getInt()); // Check whether the two source vector dimensions that are greater than one @@ -289,7 +289,7 @@ VectorType::get({n * m}, op.getVectorType().getElementType()); auto reshInputType = VectorType::get({m, n}, srcType.getElementType()); auto reshInput = - ib.create(flattenedType, op.vector()); + ib.create(flattenedType, op.getVector()); reshInput = ib.create(reshInputType, reshInput); // Extract 1-D vectors from the higher-order dimension of the input diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp --- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp +++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp @@ -86,7 +86,7 @@ dstVec.getShape().end()); } if (auto writeOp = dyn_cast(op)) { - auto insert = writeOp.vector().getDefiningOp(); + auto insert = writeOp.getVector().getDefiningOp(); if (!insert) return llvm::None; ArrayRef shape = insert.getSourceVectorType().getShape();