diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -223,7 +223,7 @@ }]; let builders = [ - OpBuilder<(ins "Value":$output, "Value":$value)> + OpBuilder<(ins "Value":$value, "Value":$output)> ]; let verifier = [{ return ::verify(*this); }]; diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -813,7 +813,7 @@ auto fillValue = rewriter.create(loc, fillValueAttr); auto filledTensor = - rewriter.create(loc, initTensor, fillValue).result(); + rewriter.create(loc, fillValue, initTensor).result(); SmallVector srcExprs; SmallVector dstExprs; @@ -1018,7 +1018,7 @@ auto initTensor = rewriter.create( loc, outputTy.getShape(), outputTy.getElementType()); Value zeroTensor = - rewriter.create(loc, initTensor, zero).getResult(0); + rewriter.create(loc, zero, initTensor).getResult(0); rewriter.replaceOpWithNewOp( op, TypeRange{op.getType()}, ValueRange{adaptor.a(), adaptor.b()}, ValueRange{zeroTensor}); @@ -1093,7 +1093,6 @@ } }; - class ReshapeConverter : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1737,7 +1736,7 @@ Value zeroVal = rewriter.create( loc, rewriter.getZeroAttr(resultType.getElementType())); Value result = - rewriter.create(loc, init, zeroVal).getResult(0); + rewriter.create(loc, zeroVal, init).getResult(0); for (auto arg : args) { sizes[axis] = rewriter.create(loc, arg, axisValue); @@ -1981,7 +1980,7 @@ auto fillValueIdx = rewriter.create( loc, rewriter.getIntegerAttr(outElementTy, 0)); auto filledTensorIdx = - rewriter.create(loc, initTensorIdx, fillValueIdx) + rewriter.create(loc, fillValueIdx, initTensorIdx) .result(); // Second fill the output buffer for the running max. @@ -1999,7 +1998,7 @@ auto fillValueMax = rewriter.create(loc, fillValueMaxAttr); auto filledTensorMax = - rewriter.create(loc, initTensorMax, fillValueMax) + rewriter.create(loc, fillValueMax, initTensorMax) .result(); // We need to reduce along the arg-max axis, with parallel operations along @@ -2288,7 +2287,7 @@ loc, resultTy.getShape(), resultTy.getElementType()); Value filledInitTensor = - rewriter.create(loc, initTensor, initialValue).result(); + rewriter.create(loc, initialValue, initTensor).result(); Value fakeWindowDims = rewriter.create(loc, kernel, outElementTy); diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -425,8 +425,8 @@ b.create(captures); } -void FillOp::build(OpBuilder &builder, OperationState &result, Value output, - Value value) { +void FillOp::build(OpBuilder &builder, OperationState &result, Value value, + Value output) { build(builder, result, output.getType().dyn_cast(), output, value); fillStructuredOpRegion(builder, *result.regions.front(), TypeRange{}, @@ -2089,7 +2089,7 @@ auto newInit = rewriter.create( loc, reshapeOp.getResultType(), oldFill.output(), reshapeOp.reassociation()); - rewriter.replaceOpWithNewOp(reshapeOp, newInit, oldFill.value()); + rewriter.replaceOpWithNewOp(reshapeOp, oldFill.value(), newInit); return success(); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp @@ -186,7 +186,7 @@ return rewriter.notifyMatchFailure(op, "operand must be of a tensor type"); - rewriter.create(op.getLoc(), adaptor.output(), adaptor.value()); + rewriter.create(op.getLoc(), adaptor.value(), adaptor.output()); rewriter.replaceOp(op, adaptor.output()); return success(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -283,7 +283,7 @@ .Default([](auto) { return Value(); }); if (!fillVal) return {}; - b.create(promotionInfo->fullLocalView, fillVal); + b.create(fillVal, promotionInfo->fullLocalView); } // Copy data into the promoted buffers. Use callback if provided. diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -676,7 +676,7 @@ // Initialize tensor with the pad value Value tmpTensor = - rewriter.create(loc, initTensor, padValue).result(); + rewriter.create(loc, padValue, initTensor).result(); // Copy original contents into new tensor // Uses linalg.generic, but could be done with std.subtensor_insert diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -755,7 +755,7 @@ // pattern.) auto padValue = padOp.getConstantPaddingValue(); if (padValue) - return rewriter.create(padOp.getLoc(), dest, padValue).result(); + return rewriter.create(padOp.getLoc(), padValue, dest).result(); // Fill could not be vectorized: Lower to tensor::GenerateOp with region. auto generateOp = rewriter.create( diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -2443,7 +2443,7 @@ b.create(loc, viewAndIndices); }, [&](OpBuilder &b, Location loc) { - b.create(loc, alloc, xferOp.padding()); + b.create(loc, xferOp.padding(), alloc); // Take partial subview of memref which guarantees no dimension // overflows. Value memRefSubView = createSubViewIntersection( diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp @@ -298,10 +298,11 @@ auto floatType = src.getType().cast().getElementType(); if (!floatType.isa()) return failure(); - if (!isOutput) - b.create( - src.getLoc(), dst, - b.create(src.getLoc(), FloatAttr::get(floatType, 42.0))); + if (!isOutput) { + Value cst = + b.create(src.getLoc(), FloatAttr::get(floatType, 42.0)); + b.create(src.getLoc(), cst, dst); + } b.create(src.getLoc(), src, dst); return success(); }