diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -167,9 +167,9 @@ // If the op is fully static, it does not need padding. // TODO: there are cases where we may still want to pad to larger sizes. - if (llvm::all_of(opToPad.getShapedOperands(), [](Value v) { - return v.getType().cast().hasStaticShape(); - })) + assert(opToPad.hasTensorSemantics() && + "expected operation to have tensor semantics"); + if (!opToPad.hasDynamicShape()) return success(); OpBuilder::InsertionGuard g(rewriter); @@ -177,16 +177,16 @@ rewriter.setInsertionPointAfter(opToPad); // Make a copy of the shaped operands and update it. SmallVector newOperands; - newOperands.reserve(opToPad.getNumShapedOperands()); - for (OpOperand &operand : opToPad.getShapedOpOperands()) { + newOperands.reserve(opToPad.getNumInputsAndOutputs()); + for (OpOperand *opOperand : opToPad.getInputAndOutputOperands()) { Value paddedOperand; // If padding was requested but the shape cannot be bounded statically then // the pattern fails to apply. - if (failed(padOperandToSmallestStaticBoundingBox(rewriter, opToPad, operand, - options, paddedOperand))) { + if (failed(padOperandToSmallestStaticBoundingBox( + rewriter, opToPad, *opOperand, options, paddedOperand))) { return failure(); } - newOperands.push_back(paddedOperand ? paddedOperand : operand.get()); + newOperands.push_back(paddedOperand ? paddedOperand : opOperand->get()); } // Clone `opToPad` to operate on the statically padded shapes. diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp @@ -24,8 +24,8 @@ return; TypeSwitch(op) .Case([&](linalg::LinalgOp linalgOp) { - operandSet.insert(linalgOp.getInputs().begin(), - linalgOp.getInputs().end()); + SmallVector inputOperands = linalgOp.getInputOperands(); + operandSet.insert(inputOperands.begin(), inputOperands.end()); }) .Default([&](Operation *operation) { operandSet.insert(operation->operand_begin(), operation->operand_end()); diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp @@ -147,14 +147,14 @@ // Tile and Fuse for tensors inputs (TODO: all tensor operands). bool changed = false; for (LinalgOp linalgOp : llvm::reverse(linalgOps)) { - for (OpOperand &opOperand : linalgOp.getShapedOpOperands()) { - if (opOperand.get().getType().isa()) { + for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) { + if (opOperand->get().getType().isa()) { // TODO: LinalgDependenceGraph should be able to update itself. // The current naive and expensive reconstruction of the graph should be // removed. linalg::Aliases aliases; linalg::LinalgDependenceGraph graph(aliases, linalgOps); - if (auto info = fuseProducerOfBuffer(b, opOperand, graph)) { + if (auto info = fuseProducerOfBuffer(b, *opOperand, graph)) { auto *originalOp = info->originalProducer.getOperation(); eraseSet.insert(originalOp); auto *originalOpInLinalgOpsVector = @@ -163,11 +163,11 @@ changed = true; } } else { - assert(opOperand.get().getType().isa()); + assert(opOperand->get().getType().isa()); // Tile and Fuse tensor input. - if (opOperand.getOperandNumber() >= linalgOp.getNumInputs()) + if (opOperand->getOperandNumber() >= linalgOp.getNumInputs()) continue; - if (auto info = fuseProducerOfTensor(b, opOperand)) { + if (auto info = fuseProducerOfTensor(b, *opOperand)) { auto *originalOp = info->originalProducer.getOperation(); auto *originalOpInLinalgOpsVector = std::find(linalgOps.begin(), linalgOps.end(), originalOp);