diff --git a/flang/include/flang/Optimizer/Support/InitFIR.h b/flang/include/flang/Optimizer/Support/InitFIR.h --- a/flang/include/flang/Optimizer/Support/InitFIR.h +++ b/flang/include/flang/Optimizer/Support/InitFIR.h @@ -27,7 +27,7 @@ namespace fir::support { #define FLANG_NONCODEGEN_DIALECT_LIST \ - mlir::AffineDialect, FIROpsDialect, hlfir::hlfirDialect, \ + mlir::affine::AffineDialect, FIROpsDialect, hlfir::hlfirDialect, \ mlir::acc::OpenACCDialect, mlir::omp::OpenMPDialect, \ mlir::scf::SCFDialect, mlir::arith::ArithDialect, \ mlir::cf::ControlFlowDialect, mlir::func::FuncDialect, \ @@ -63,25 +63,25 @@ inline void registerMLIRPassesForFortranTools() { mlir::registerCanonicalizerPass(); mlir::registerCSEPass(); - mlir::registerAffineLoopFusionPass(); + mlir::affine::registerAffineLoopFusionPass(); mlir::registerLoopInvariantCodeMotionPass(); - mlir::registerLoopCoalescingPass(); + mlir::affine::registerLoopCoalescingPass(); mlir::registerStripDebugInfoPass(); mlir::registerPrintOpStatsPass(); mlir::registerInlinerPass(); mlir::registerSCCPPass(); - mlir::registerAffineScalarReplacementPass(); + mlir::affine::registerAffineScalarReplacementPass(); mlir::registerSymbolDCEPass(); mlir::registerLocationSnapshotPass(); - mlir::registerAffinePipelineDataTransferPass(); + mlir::affine::registerAffinePipelineDataTransferPass(); - mlir::registerAffineVectorizePass(); - mlir::registerAffineLoopUnrollPass(); - mlir::registerAffineLoopUnrollAndJamPass(); - mlir::registerSimplifyAffineStructuresPass(); - mlir::registerAffineLoopInvariantCodeMotionPass(); - mlir::registerAffineLoopTilingPass(); - mlir::registerAffineDataCopyGenerationPass(); + mlir::affine::registerAffineVectorizePass(); + mlir::affine::registerAffineLoopUnrollPass(); + mlir::affine::registerAffineLoopUnrollAndJamPass(); + mlir::affine::registerSimplifyAffineStructuresPass(); + mlir::affine::registerAffineLoopInvariantCodeMotionPass(); + mlir::affine::registerAffineLoopTilingPass(); + mlir::affine::registerAffineDataCopyGenerationPass(); mlir::registerConvertAffineToStandardPass(); } diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td --- a/flang/include/flang/Optimizer/Transforms/Passes.td +++ b/flang/include/flang/Optimizer/Transforms/Passes.td @@ -64,7 +64,8 @@ }]; let constructor = "::fir::createPromoteToAffinePass()"; let dependentDialects = [ - "fir::FIROpsDialect", "mlir::func::FuncDialect", "mlir::AffineDialect" + "fir::FIROpsDialect", "mlir::func::FuncDialect", + "mlir::affine::AffineDialect" ]; } @@ -78,7 +79,8 @@ }]; let constructor = "::fir::createAffineDemotionPass()"; let dependentDialects = [ - "fir::FIROpsDialect", "mlir::func::FuncDialect", "mlir::AffineDialect" + "fir::FIROpsDialect", "mlir::func::FuncDialect", + "mlir::affine::AffineDialect" ]; } diff --git a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp --- a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp +++ b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp @@ -46,16 +46,17 @@ namespace { -class AffineLoadConversion : public OpConversionPattern { +class AffineLoadConversion + : public OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(mlir::AffineLoadOp op, OpAdaptor adaptor, + matchAndRewrite(mlir::affine::AffineLoadOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { SmallVector indices(adaptor.getIndices()); - auto maybeExpandedMap = - expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices); + auto maybeExpandedMap = affine::expandAffineMap(rewriter, op.getLoc(), + op.getAffineMap(), indices); if (!maybeExpandedMap) return failure(); @@ -68,16 +69,17 @@ } }; -class AffineStoreConversion : public OpConversionPattern { +class AffineStoreConversion + : public OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(mlir::AffineStoreOp op, OpAdaptor adaptor, + matchAndRewrite(mlir::affine::AffineStoreOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { SmallVector indices(op.getIndices()); - auto maybeExpandedMap = - expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices); + auto maybeExpandedMap = affine::expandAffineMap(rewriter, op.getLoc(), + op.getAffineMap(), indices); if (!maybeExpandedMap) return failure(); diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp --- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp +++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp @@ -227,7 +227,7 @@ if (auto blockArg = value.dyn_cast()) { affineArgs.push_back(value); if (isa(blockArg.getOwner()->getParentOp()) || - isa(blockArg.getOwner()->getParentOp())) + isa(blockArg.getOwner()->getParentOp())) return {mlir::getAffineDimExpr(dimCount++, value.getContext())}; return {mlir::getAffineSymbolExpr(symCount++, value.getContext())}; } @@ -397,7 +397,7 @@ } /// Returns affine.apply and fir.convert from array_coor and gendims -static std::pair +static std::pair createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) { auto acoOp = arrayRef.getDefiningOp(); auto affineMap = @@ -407,8 +407,8 @@ populateIndexArgs(acoOp, indexArgs, rewriter); - auto affineApply = rewriter.create(acoOp.getLoc(), - affineMap, indexArgs); + auto affineApply = rewriter.create( + acoOp.getLoc(), affineMap, indexArgs); auto arrayElementType = coordinateArrayElement(acoOp); auto newType = mlir::MemRefType::get({mlir::ShapedType::kDynamic}, arrayElementType); @@ -420,7 +420,7 @@ static void rewriteLoad(fir::LoadOp loadOp, mlir::PatternRewriter &rewriter) { rewriter.setInsertionPoint(loadOp); auto affineOps = createAffineOps(loadOp.getMemref(), rewriter); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( loadOp, affineOps.second.getResult(), affineOps.first.getResult()); } @@ -428,9 +428,9 @@ mlir::PatternRewriter &rewriter) { rewriter.setInsertionPoint(storeOp); auto affineOps = createAffineOps(storeOp.getMemref(), rewriter); - rewriter.replaceOpWithNewOp(storeOp, storeOp.getValue(), - affineOps.second.getResult(), - affineOps.first.getResult()); + rewriter.replaceOpWithNewOp( + storeOp, storeOp.getValue(), affineOps.second.getResult(), + affineOps.first.getResult()); } static void rewriteMemoryOps(Block *block, mlir::PatternRewriter &rewriter) { @@ -483,7 +483,7 @@ } private: - std::pair + std::pair createAffineFor(fir::DoLoopOp op, mlir::PatternRewriter &rewriter) const { if (auto constantStep = constantIntegerLike(op.getStep())) if (*constantStep > 0) @@ -492,10 +492,10 @@ } // when step for the loop is positive compile time constant - std::pair + std::pair positiveConstantStep(fir::DoLoopOp op, int64_t step, mlir::PatternRewriter &rewriter) const { - auto affineFor = rewriter.create( + auto affineFor = rewriter.create( op.getLoc(), ValueRange(op.getLowerBound()), mlir::AffineMap::get(0, 1, mlir::getAffineSymbolExpr(0, op.getContext())), @@ -506,14 +506,14 @@ return std::make_pair(affineFor, affineFor.getInductionVar()); } - std::pair + std::pair genericBounds(fir::DoLoopOp op, mlir::PatternRewriter &rewriter) const { auto lowerBound = mlir::getAffineSymbolExpr(0, op.getContext()); auto upperBound = mlir::getAffineSymbolExpr(1, op.getContext()); auto step = mlir::getAffineSymbolExpr(2, op.getContext()); mlir::AffineMap upperBoundMap = mlir::AffineMap::get( 0, 3, (upperBound - lowerBound + step).floorDiv(step)); - auto genericUpperBound = rewriter.create( + auto genericUpperBound = rewriter.create( op.getLoc(), upperBoundMap, ValueRange({op.getLowerBound(), op.getUpperBound(), op.getStep()})); auto actualIndexMap = mlir::AffineMap::get( @@ -521,7 +521,7 @@ (lowerBound + mlir::getAffineDimExpr(0, op.getContext())) * mlir::getAffineSymbolExpr(1, op.getContext())); - auto affineFor = rewriter.create( + auto affineFor = rewriter.create( op.getLoc(), ValueRange(), AffineMap::getConstantMap(0, op.getContext()), genericUpperBound.getResult(), @@ -529,7 +529,7 @@ 1 + mlir::getAffineSymbolExpr(0, op.getContext())), 1); rewriter.setInsertionPointToStart(affineFor.getBody()); - auto actualIndex = rewriter.create( + auto actualIndex = rewriter.create( op.getLoc(), actualIndexMap, ValueRange( {affineFor.getInductionVar(), op.getLowerBound(), op.getStep()})); @@ -558,7 +558,7 @@ << "AffineIfConversion: couldn't calculate affine condition\n";); return failure(); } - auto affineIf = rewriter.create( + auto affineIf = rewriter.create( op.getLoc(), affineCondition.getIntegerSet(), affineCondition.getAffineArgs(), !op.getElseRegion().empty()); rewriter.startRootUpdate(affineIf); @@ -596,7 +596,7 @@ patterns.insert(context, functionAnalysis); patterns.insert(context, functionAnalysis); mlir::ConversionTarget target = *context; - target.addLegalDialect(); target.addDynamicallyLegalOp([&functionAnalysis](fir::IfOp op) { diff --git a/flang/lib/Optimizer/Transforms/CharacterConversion.cpp b/flang/lib/Optimizer/Transforms/CharacterConversion.cpp --- a/flang/lib/Optimizer/Transforms/CharacterConversion.cpp +++ b/flang/lib/Optimizer/Transforms/CharacterConversion.cpp @@ -110,7 +110,7 @@ mlir::RewritePatternSet patterns(context); patterns.insert(context); mlir::ConversionTarget target(*context); - target.addLegalDialect(); diff --git a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp --- a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp +++ b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp @@ -316,8 +316,9 @@ patterns.insert( context, forceLoopToExecuteOnce); mlir::ConversionTarget target(*context); - target.addLegalDialect(); + target.addLegalDialect(); // apply the patterns target.addIllegalOp(); diff --git a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp --- a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp +++ b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp @@ -238,8 +238,9 @@ patterns.insert(context, moduleMutex); patterns.insert(context, bindingTables); mlir::ConversionTarget target(*context); - target.addLegalDialect(); + target.addLegalDialect(); // apply the patterns target.addIllegalOp(); diff --git a/mlir/docs/DialectConversion.md b/mlir/docs/DialectConversion.md --- a/mlir/docs/DialectConversion.md +++ b/mlir/docs/DialectConversion.md @@ -98,7 +98,8 @@ /// Mark all operations within Affine dialect have dynamic legality /// constraints. - addDynamicallyLegalDialect([](Operation *op) { ... }); + addDynamicallyLegalDialect( + [](Operation *op) { ... }); /// Mark `func.return` as dynamically legal, but provide a specific legality /// callback. diff --git a/mlir/docs/Tutorials/Toy/Ch-5.md b/mlir/docs/Tutorials/Toy/Ch-5.md --- a/mlir/docs/Tutorials/Toy/Ch-5.md +++ b/mlir/docs/Tutorials/Toy/Ch-5.md @@ -64,7 +64,7 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arith`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail diff --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp @@ -75,14 +75,15 @@ // loop induction variables. SmallVector lowerBounds(tensorType.getRank(), /*Value=*/0); SmallVector steps(tensorType.getRank(), /*Value=*/1); - buildAffineLoopNest( + affine::buildAffineLoopNest( rewriter, loc, lowerBounds, tensorType.getShape(), steps, [&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) { // Call the processing function with the rewriter, the memref operands, // and the loop induction variables. This function will return the value // to store at the current index. Value valueToStore = processIteration(nestedBuilder, operands, ivs); - nestedBuilder.create(loc, valueToStore, alloc, ivs); + nestedBuilder.create(loc, valueToStore, alloc, + ivs); }); // Replace this operation with the generated alloc. @@ -113,9 +114,9 @@ // Generate loads for the element of 'lhs' and 'rhs' at the // inner loop. - auto loadedLhs = builder.create( + auto loadedLhs = builder.create( loc, binaryAdaptor.getLhs(), loopIvs); - auto loadedRhs = builder.create( + auto loadedRhs = builder.create( loc, binaryAdaptor.getRhs(), loopIvs); // Create the binary operation performed on the loaded @@ -174,7 +175,7 @@ // The last dimension is the base case of the recursion, at this point // we store the element at the given index. if (dimension == valueShape.size()) { - rewriter.create( + rewriter.create( loc, rewriter.create(loc, *valueIt++), alloc, llvm::ArrayRef(indices)); return; @@ -291,8 +292,8 @@ // Transpose the elements by generating a load from the // reverse indices. SmallVector reverseIvs(llvm::reverse(loopIvs)); - return builder.create(loc, input, - reverseIvs); + return builder.create(loc, input, + reverseIvs); }); return success(); } @@ -313,7 +314,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ToyToAffineLoweringPass) void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() final; }; @@ -327,8 +329,9 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arith`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); + target.addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail // if any of these operations are *not* converted. Given that we actually want diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp --- a/mlir/examples/toy/Ch5/toyc.cpp +++ b/mlir/examples/toy/Ch5/toyc.cpp @@ -148,8 +148,8 @@ // Add optimizations if enabled. if (enableOpt) { - optPM.addPass(mlir::createLoopFusionPass()); - optPM.addPass(mlir::createAffineScalarReplacementPass()); + optPM.addPass(mlir::affine::createLoopFusionPass()); + optPM.addPass(mlir::affine::createAffineScalarReplacementPass()); } } diff --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp @@ -75,14 +75,15 @@ // loop induction variables. SmallVector lowerBounds(tensorType.getRank(), /*Value=*/0); SmallVector steps(tensorType.getRank(), /*Value=*/1); - buildAffineLoopNest( + affine::buildAffineLoopNest( rewriter, loc, lowerBounds, tensorType.getShape(), steps, [&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) { // Call the processing function with the rewriter, the memref operands, // and the loop induction variables. This function will return the value // to store at the current index. Value valueToStore = processIteration(nestedBuilder, operands, ivs); - nestedBuilder.create(loc, valueToStore, alloc, ivs); + nestedBuilder.create(loc, valueToStore, alloc, + ivs); }); // Replace this operation with the generated alloc. @@ -113,9 +114,9 @@ // Generate loads for the element of 'lhs' and 'rhs' at the // inner loop. - auto loadedLhs = builder.create( + auto loadedLhs = builder.create( loc, binaryAdaptor.getLhs(), loopIvs); - auto loadedRhs = builder.create( + auto loadedRhs = builder.create( loc, binaryAdaptor.getRhs(), loopIvs); // Create the binary operation performed on the loaded @@ -174,7 +175,7 @@ // The last dimension is the base case of the recursion, at this point // we store the element at the given index. if (dimension == valueShape.size()) { - rewriter.create( + rewriter.create( loc, rewriter.create(loc, *valueIt++), alloc, llvm::ArrayRef(indices)); return; @@ -291,8 +292,8 @@ // Transpose the elements by generating a load from the // reverse indices. SmallVector reverseIvs(llvm::reverse(loopIvs)); - return builder.create(loc, input, - reverseIvs); + return builder.create(loc, input, + reverseIvs); }); return success(); } @@ -313,7 +314,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ToyToAffineLoweringPass) void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() final; }; @@ -327,8 +329,9 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arith`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); + target.addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail // if any of these operations are *not* converted. Given that we actually want diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp --- a/mlir/examples/toy/Ch6/toyc.cpp +++ b/mlir/examples/toy/Ch6/toyc.cpp @@ -166,8 +166,8 @@ // Add optimizations if enabled. if (enableOpt) { - optPM.addPass(mlir::createLoopFusionPass()); - optPM.addPass(mlir::createAffineScalarReplacementPass()); + optPM.addPass(mlir::affine::createLoopFusionPass()); + optPM.addPass(mlir::affine::createAffineScalarReplacementPass()); } } diff --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp @@ -75,14 +75,15 @@ // loop induction variables. SmallVector lowerBounds(tensorType.getRank(), /*Value=*/0); SmallVector steps(tensorType.getRank(), /*Value=*/1); - buildAffineLoopNest( + affine::buildAffineLoopNest( rewriter, loc, lowerBounds, tensorType.getShape(), steps, [&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) { // Call the processing function with the rewriter, the memref operands, // and the loop induction variables. This function will return the value // to store at the current index. Value valueToStore = processIteration(nestedBuilder, operands, ivs); - nestedBuilder.create(loc, valueToStore, alloc, ivs); + nestedBuilder.create(loc, valueToStore, alloc, + ivs); }); // Replace this operation with the generated alloc. @@ -113,9 +114,9 @@ // Generate loads for the element of 'lhs' and 'rhs' at the // inner loop. - auto loadedLhs = builder.create( + auto loadedLhs = builder.create( loc, binaryAdaptor.getLhs(), loopIvs); - auto loadedRhs = builder.create( + auto loadedRhs = builder.create( loc, binaryAdaptor.getRhs(), loopIvs); // Create the binary operation performed on the loaded @@ -174,7 +175,7 @@ // The last dimension is the base case of the recursion, at this point // we store the element at the given index. if (dimension == valueShape.size()) { - rewriter.create( + rewriter.create( loc, rewriter.create(loc, *valueIt++), alloc, llvm::ArrayRef(indices)); return; @@ -291,8 +292,8 @@ // Transpose the elements by generating a load from the // reverse indices. SmallVector reverseIvs(llvm::reverse(loopIvs)); - return builder.create(loc, input, - reverseIvs); + return builder.create(loc, input, + reverseIvs); }); return success(); } @@ -313,7 +314,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ToyToAffineLoweringPass) void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() final; }; @@ -327,8 +329,9 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arith`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); + target.addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail // if any of these operations are *not* converted. Given that we actually want diff --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp --- a/mlir/examples/toy/Ch7/toyc.cpp +++ b/mlir/examples/toy/Ch7/toyc.cpp @@ -167,8 +167,8 @@ // Add optimizations if enabled. if (enableOpt) { - optPM.addPass(mlir::createLoopFusionPass()); - optPM.addPass(mlir::createAffineScalarReplacementPass()); + optPM.addPass(mlir::affine::createLoopFusionPass()); + optPM.addPass(mlir::affine::createAffineScalarReplacementPass()); } } diff --git a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h --- a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h +++ b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h @@ -12,16 +12,18 @@ #include "mlir/Support/LLVM.h" namespace mlir { -class AffineForOp; class Location; struct LogicalResult; class OpBuilder; class Pass; class RewritePattern; +class RewritePatternSet; class Value; class ValueRange; -class RewritePatternSet; +namespace affine { +class AffineForOp; +} // namespace affine #define GEN_PASS_DECL_CONVERTAFFINETOSTANDARD #include "mlir/Conversion/Passes.h.inc" @@ -37,11 +39,11 @@ /// Emit code that computes the lower bound of the given affine loop using /// standard arithmetic operations. -Value lowerAffineLowerBound(AffineForOp op, OpBuilder &builder); +Value lowerAffineLowerBound(affine::AffineForOp op, OpBuilder &builder); /// Emit code that computes the upper bound of the given affine loop using /// standard arithmetic operations. -Value lowerAffineUpperBound(AffineForOp op, OpBuilder &builder); +Value lowerAffineUpperBound(affine::AffineForOp op, OpBuilder &builder); /// Lowers affine control flow operations (ForStmt, IfStmt and AffineApplyOp) /// to equivalent lower-level constructs (flow of basic blocks and arithmetic diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -856,7 +856,7 @@ def ConvertParallelLoopToGpu : Pass<"convert-parallel-loops-to-gpu"> { let summary = "Convert mapped scf.parallel ops to gpu launch operations"; let constructor = "mlir::createParallelLoopToGpuPass()"; - let dependentDialects = ["AffineDialect", "gpu::GPUDialect"]; + let dependentDialects = ["affine::AffineDialect", "gpu::GPUDialect"]; } //===----------------------------------------------------------------------===// @@ -1033,7 +1033,7 @@ "dialect"; let constructor = "mlir::createConvertVectorToGPUPass()"; let dependentDialects = [ - "memref::MemRefDialect", "gpu::GPUDialect", "AffineDialect", + "memref::MemRefDialect", "gpu::GPUDialect", "affine::AffineDialect", "vector::VectorDialect", "nvgpu::NVGPUDialect" ]; @@ -1052,7 +1052,7 @@ "dialect"; let constructor = "mlir::createConvertVectorToSCFPass()"; let dependentDialects = [ - "AffineDialect", + "affine::AffineDialect", "memref::MemRefDialect", "scf::SCFDialect", "tensor::TensorDialect" diff --git a/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h b/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h --- a/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h +++ b/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h @@ -11,7 +11,6 @@ #include "mlir/Support/LLVM.h" namespace mlir { -class AffineForOp; class ConversionTarget; struct LogicalResult; class MLIRContext; @@ -19,6 +18,10 @@ class Operation; class RewritePatternSet; +namespace affine { +class AffineForOp; +} // namespace affine + namespace scf { class ForOp; } // namespace scf @@ -37,7 +40,7 @@ // TODO: Consider removing this in favor of affine.for -> affine.parallel // detection followed by an affine.parallel -> scf.parallel -> gpu.launch // conversion -LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp, +LogicalResult convertAffineLoopNestToGPULaunch(affine::AffineForOp forOp, unsigned numBlockDims, unsigned numThreadDims); diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h --- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h @@ -21,13 +21,14 @@ #include namespace mlir { +class Operation; +namespace affine { class AffineApplyOp; class AffineForOp; class AffineValueMap; class FlatAffineRelation; class FlatAffineValueConstraints; -class Operation; /// A description of a (parallelizable) reduction in an affine loop. struct LoopReduction { @@ -191,6 +192,7 @@ AffineForOp forOp, unsigned maxLoopDepth, std::vector> *depCompsVec); +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_ANALYSIS_AFFINEANALYSIS_H diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h --- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h @@ -22,23 +22,24 @@ #include namespace mlir { - -class AffineCondition; -class AffineForOp; -class AffineIfOp; -class AffineParallelOp; class AffineMap; -class AffineValueMap; class IntegerSet; -class MLIRContext; -class Value; class MemRefType; +class MLIRContext; struct MutableAffineMap; +class Value; namespace presburger { class MultiAffineFunction; } // namespace presburger +namespace affine { +class AffineCondition; +class AffineForOp; +class AffineIfOp; +class AffineParallelOp; +class AffineValueMap; + /// FlatAffineValueConstraints is an extension of FlatLinearValueConstraints /// with helper functions for Affine dialect ops. class FlatAffineValueConstraints : public FlatLinearValueConstraints { @@ -254,6 +255,7 @@ LogicalResult getRelationFromMap(const AffineValueMap &map, FlatAffineRelation &rel); +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_ANALYSIS_AFFINESTRUCTURES_H diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h --- a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h @@ -18,16 +18,17 @@ #include namespace mlir { - class AffineExpr; -class AffineForOp; class AffineMap; class BlockArgument; class MemRefType; -class NestedPattern; class Operation; class Value; +namespace affine { +class AffineForOp; +class NestedPattern; + /// Returns the trip count of the loop as an affine map with its corresponding /// operands if the latter is expressible as an affine expression, and nullptr /// otherwise. This method always succeeds as long as the lower bound is not a @@ -83,6 +84,7 @@ // the support. bool isOpwiseShiftValid(AffineForOp forOp, ArrayRef shifts); +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_ANALYSIS_LOOPANALYSIS_H diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h b/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h --- a/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h @@ -14,9 +14,10 @@ #include "llvm/Support/Allocator.h" namespace mlir { +class Operation; +namespace affine { class NestedPattern; -class Operation; /// An NestedPattern captures nested patterns in the IR. /// It is used in conjunction with a scoped NestedPatternContext which is an @@ -191,6 +192,7 @@ bool isLoadOrStore(Operation &op); } // namespace matcher +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_ANALYSIS_NESTEDMATCHER_H diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h b/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h --- a/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h @@ -22,15 +22,16 @@ #include namespace mlir { - -class AffineForOp; -class AffineValueMap; class Block; class Location; -struct MemRefAccess; class Operation; class Value; +namespace affine { +class AffineForOp; +class AffineValueMap; +struct MemRefAccess; + // LoopNestStateCollector walks loop nests and collects load and store // operations, and whether or not a region holding op other than ForOp and IfOp // was encountered in the loop nest. @@ -576,6 +577,7 @@ simplifyConstrainedMinMaxOp(Operation *op, FlatAffineValueConstraints constraints); +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_ANALYSIS_UTILS_H diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h --- a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h @@ -17,8 +17,6 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/OpDefinition.h" -namespace mlir { #include "mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h.inc" -} // namespace mlir #endif // MLIR_DIALECT_AFFINE_IR_AFFINEMEMORYOPINTERFACES_H diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td @@ -20,62 +20,59 @@ Interface to query characteristics of read-like ops with affine restrictions. }]; + let cppNamespace = "::mlir::affine"; let methods = [ InterfaceMethod< /*desc=*/"Returns the memref operand to read from.", - /*retTy=*/"Value", + /*retTy=*/"::mlir::Value", /*methodName=*/"getMemRef", /*args=*/(ins), /*methodBody*/[{}], /*defaultImplementation=*/ [{ - ConcreteOp op = cast(this->getOperation()); - return op.getOperand(op.getMemRefOperandIndex()); + return $_op.getOperand($_op.getMemRefOperandIndex()); }] >, InterfaceMethod< /*desc=*/"Returns the type of the memref operand.", - /*retTy=*/"MemRefType", + /*retTy=*/"::mlir::MemRefType", /*methodName=*/"getMemRefType", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return op.getMemRef().getType().template cast(); + return $_op.getMemRef().getType().template cast<::mlir::MemRefType>(); }] >, InterfaceMethod< /*desc=*/"Returns affine map operands.", - /*retTy=*/"Operation::operand_range", + /*retTy=*/"::mlir::Operation::operand_range", /*methodName=*/"getMapOperands", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return llvm::drop_begin(op.getOperands(), 1); + return llvm::drop_begin($_op.getOperands(), 1); }] >, InterfaceMethod< /*desc=*/[{ Returns the affine map used to index the memref for this operation. }], - /*retTy=*/"AffineMap", + /*retTy=*/"::mlir::AffineMap", /*methodName=*/"getAffineMap", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return op.getAffineMapAttr().getValue(); + return $_op.getAffineMapAttr().getValue(); }] >, InterfaceMethod< /*desc=*/"Returns the value read by this operation.", - /*retTy=*/"Value", + /*retTy=*/"::mlir::Value", /*methodName=*/"getValue", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - return cast(this->getOperation()); + return $_op; }] >, ]; @@ -86,63 +83,59 @@ Interface to query characteristics of write-like ops with affine restrictions. }]; + let cppNamespace = "::mlir::affine"; let methods = [ InterfaceMethod< /*desc=*/"Returns the memref operand to write to.", - /*retTy=*/"Value", + /*retTy=*/"::mlir::Value", /*methodName=*/"getMemRef", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return op.getOperand(op.getMemRefOperandIndex()); + return $_op.getOperand($_op.getMemRefOperandIndex()); }] >, InterfaceMethod< /*desc=*/"Returns the type of the memref operand.", - /*retTy=*/"MemRefType", + /*retTy=*/"::mlir::MemRefType", /*methodName=*/"getMemRefType", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return op.getMemRef().getType().template cast(); + return $_op.getMemRef().getType().template cast<::mlir::MemRefType>(); }] >, InterfaceMethod< /*desc=*/"Returns affine map operands.", - /*retTy=*/"Operation::operand_range", + /*retTy=*/"::mlir::Operation::operand_range", /*methodName=*/"getMapOperands", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return llvm::drop_begin(op.getOperands(), 2); + return llvm::drop_begin($_op.getOperands(), 2); }] >, InterfaceMethod< /*desc=*/[{ Returns the affine map used to index the memref for this operation. }], - /*retTy=*/"AffineMap", + /*retTy=*/"::mlir::AffineMap", /*methodName=*/"getAffineMap", /*args=*/(ins), /*methodName=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return op.getAffineMapAttr().getValue(); + return $_op.getAffineMapAttr().getValue(); }] >, InterfaceMethod< /*desc=*/"Returns the value to store.", - /*retTy=*/"Value", + /*retTy=*/"::mlir::Value", /*methodName=*/"getValueToStore", /*args=*/(ins), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - return op.getOperand(op.getStoredValOperandIndex()); + return $_op.getOperand($_op.getStoredValOperandIndex()); }] >, ]; @@ -155,20 +148,21 @@ memref operand. The memref argument given to this interface much match one of those memref operands. }]; + let cppNamespace = "::mlir::affine"; let methods = [ InterfaceMethod< /*desc=*/"Returns the AffineMapAttr associated with 'memref'.", - /*retTy=*/"NamedAttribute", + /*retTy=*/"::mlir::NamedAttribute", /*methodName=*/"getAffineMapAttrForMemRef", - /*args=*/(ins "Value":$memref), + /*args=*/(ins "::mlir::Value":$memref), /*methodBody=*/[{}], /*defaultImplementation=*/[{ - ConcreteOp op = cast(this->getOperation()); - assert(memref == op.getMemRef() && + assert(memref == $_op.getMemRef() && "Expected memref argument to match memref operand"); - return {StringAttr::get(op.getContext(), op.getMapAttrStrName()), - op.getAffineMapAttr()}; + return {::mlir::StringAttr::get( + $_op.getContext(), $_op.getMapAttrStrName()), + $_op.getAffineMapAttr()}; }] >, ]; diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h @@ -22,13 +22,12 @@ #include "mlir/Interfaces/LoopLikeInterface.h" namespace mlir { +namespace affine { + class AffineApplyOp; class AffineBound; class AffineValueMap; -/// TODO: These should be renamed if they are on the mlir namespace. -/// Ideally, they should go in a mlir::affine:: namespace. - /// A utility function to check if a value is defined at the top level of an /// op with trait `AffineScope` or is a region argument for such an op. A value /// of index type defined at the top level is always a valid symbol for all its @@ -438,13 +437,18 @@ /// argument. void fullyComposeAffineMapAndOperands(AffineMap *map, SmallVectorImpl *operands); + +} // namespace affine } // namespace mlir + #include "mlir/Dialect/Affine/IR/AffineOpsDialect.h.inc" #define GET_OP_CLASSES #include "mlir/Dialect/Affine/IR/AffineOps.h.inc" namespace mlir { +namespace affine { + /// Returns true if the provided value is the induction variable of an /// AffineForOp. bool isAffineForInductionVar(Value val); @@ -537,6 +541,7 @@ friend class AffineForOp; }; +} // namespace affine } // namespace mlir #endif diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td @@ -21,7 +21,7 @@ def Affine_Dialect : Dialect { let name = "affine"; - let cppNamespace = "mlir"; + let cppNamespace = "::mlir::affine"; let hasConstantMaterializer = 1; let dependentDialects = ["arith::ArithDialect"]; } diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h b/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h --- a/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h @@ -18,6 +18,7 @@ #include "mlir/IR/Value.h" namespace mlir { +namespace affine { /// An AffineValueMap is an affine map plus its ML value operands and /// results for analysis purposes. The structure is still a tree form that is @@ -89,6 +90,7 @@ SmallVector results; }; +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_IR_AFFINEVALUEMAP_H diff --git a/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h b/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h --- a/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h +++ b/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h @@ -21,9 +21,11 @@ #include "llvm/ADT/SmallVector.h" namespace mlir { +class Operation; + +namespace affine { class AffineForOp; struct ComputationSliceState; -class Operation; // TODO: Extend this module to include utility functions for querying fusion // cost/storage reduction, and for performing the loop fusion transformation. @@ -165,6 +167,8 @@ void gatherProducerConsumerMemrefs(ArrayRef srcOps, ArrayRef dstOps, DenseSet &producerConsumerMemrefs); + +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_LOOPFUSIONUTILS_H diff --git a/mlir/include/mlir/Dialect/Affine/LoopUtils.h b/mlir/include/mlir/Dialect/Affine/LoopUtils.h --- a/mlir/include/mlir/Dialect/Affine/LoopUtils.h +++ b/mlir/include/mlir/Dialect/Affine/LoopUtils.h @@ -22,10 +22,8 @@ #include namespace mlir { -class AffineForOp; class AffineMap; class LoopLikeOpInterface; -struct MemRefRegion; class OpBuilder; class Value; class ValueRange; @@ -39,6 +37,10 @@ class ParallelOp; } // namespace scf +namespace affine { +class AffineForOp; +struct MemRefRegion; + /// Unrolls this for operation completely if the trip count is known to be /// constant. Returns failure otherwise. LogicalResult loopUnrollFull(AffineForOp forOp); @@ -345,6 +347,7 @@ return result; } +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_LOOPUTILS_H diff --git a/mlir/include/mlir/Dialect/Affine/Passes.h b/mlir/include/mlir/Dialect/Affine/Passes.h --- a/mlir/include/mlir/Dialect/Affine/Passes.h +++ b/mlir/include/mlir/Dialect/Affine/Passes.h @@ -23,6 +23,7 @@ class FuncOp; } // namespace func +namespace affine { class AffineForOp; /// Fusion mode to attempt. The default mode `Greedy` does both @@ -123,6 +124,7 @@ #define GEN_PASS_REGISTRATION #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_PASSES_H diff --git a/mlir/include/mlir/Dialect/Affine/Passes.td b/mlir/include/mlir/Dialect/Affine/Passes.td --- a/mlir/include/mlir/Dialect/Affine/Passes.td +++ b/mlir/include/mlir/Dialect/Affine/Passes.td @@ -17,7 +17,7 @@ def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "func::FuncOp"> { let summary = "Generate explicit copying for affine memory operations"; - let constructor = "mlir::createAffineDataCopyGenerationPass()"; + let constructor = "mlir::affine::createAffineDataCopyGenerationPass()"; let dependentDialects = ["memref::MemRefDialect"]; let options = [ Option<"fastMemoryCapacity", "fast-mem-capacity", "uint64_t", @@ -152,7 +152,7 @@ } ``` }]; - let constructor = "mlir::createLoopFusionPass()"; + let constructor = "mlir::affine::createLoopFusionPass()"; let options = [ Option<"computeToleranceThreshold", "fusion-compute-tolerance", "double", /*default=*/"0.30f", "Fractional increase in additional computation " @@ -166,12 +166,12 @@ Option<"maximalFusion", "fusion-maximal", "bool", /*default=*/"false", "Enables maximal loop fusion">, Option<"affineFusionMode", "mode", "enum FusionMode", - "mlir::FusionMode::Greedy", "fusion mode to attempt", - "llvm::cl::values(clEnumValN(mlir::FusionMode::Greedy," + "mlir::affine::FusionMode::Greedy", "fusion mode to attempt", + "llvm::cl::values(clEnumValN(mlir::affine::FusionMode::Greedy," " \"greedy\", \"Perform greedy (both producer-consumer and sibling) fusion\"), " - "clEnumValN( mlir::FusionMode::ProducerConsumer, " + "clEnumValN( mlir::affine::FusionMode::ProducerConsumer, " "\"producer\", \"Perform only producer-consumer fusion\"), " - "clEnumValN( mlir::FusionMode::Sibling, " + "clEnumValN( mlir::affine::FusionMode::Sibling, " "\"sibling\", \"Perform only sibling fusion\"))">, ]; let dependentDialects = ["memref::MemRefDialect"]; @@ -180,12 +180,12 @@ def AffineLoopInvariantCodeMotion : Pass<"affine-loop-invariant-code-motion", "func::FuncOp"> { let summary = "Hoist loop invariant instructions outside of affine loops"; - let constructor = "mlir::createAffineLoopInvariantCodeMotionPass()"; + let constructor = "mlir::affine::createAffineLoopInvariantCodeMotionPass()"; } def AffineLoopTiling : Pass<"affine-loop-tile", "func::FuncOp"> { let summary = "Tile affine loop nests"; - let constructor = "mlir::createLoopTilingPass()"; + let constructor = "mlir::affine::createLoopTilingPass()"; let options = [ Option<"cacheSizeInKiB", "cache-size", "uint64_t", /*default=*/"512", "Set size of cache to tile for in KiB (default: 512)">, @@ -201,7 +201,7 @@ def AffineLoopUnroll : Pass<"affine-loop-unroll", "func::FuncOp"> { let summary = "Unroll affine loops"; - let constructor = "mlir::createLoopUnrollPass()"; + let constructor = "mlir::affine::createLoopUnrollPass()"; let options = [ Option<"unrollFactor", "unroll-factor", "unsigned", /*default=*/"4", "Use this unroll factor for all loops being unrolled">, @@ -221,7 +221,7 @@ def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "func::FuncOp"> { let summary = "Unroll and jam affine loops"; - let constructor = "mlir::createLoopUnrollAndJamPass()"; + let constructor = "mlir::affine::createLoopUnrollAndJamPass()"; let options = [ Option<"unrollJamFactor", "unroll-jam-factor", "unsigned", /*default=*/"4", @@ -295,7 +295,7 @@ } ``` }]; - let constructor = "mlir::createPipelineDataTransferPass()"; + let constructor = "mlir::affine::createPipelineDataTransferPass()"; } def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> { @@ -341,7 +341,7 @@ } ``` }]; - let constructor = "mlir::createAffineScalarReplacementPass()"; + let constructor = "mlir::affine::createAffineScalarReplacementPass()"; } def AffineVectorize : Pass<"affine-super-vectorize", "func::FuncOp"> { @@ -369,7 +369,7 @@ def AffineParallelize : Pass<"affine-parallelize", "func::FuncOp"> { let summary = "Convert affine.for ops into 1-D affine.parallel"; - let constructor = "mlir::createAffineParallelizePass()"; + let constructor = "mlir::affine::createAffineParallelizePass()"; let options = [ Option<"maxNested", "max-nested", "unsigned", /*default=*/"-1u", "Maximum number of nested parallel loops to produce. " @@ -382,7 +382,7 @@ def AffineLoopNormalize : Pass<"affine-loop-normalize", "func::FuncOp"> { let summary = "Apply normalization transformations to affine loop-like ops"; - let constructor = "mlir::createAffineLoopNormalizePass()"; + let constructor = "mlir::affine::createAffineLoopNormalizePass()"; let options = [ Option<"promoteSingleIter", "promote-single-iter", "bool", /*default=*/"true", "Promote single iteration loops">, @@ -392,19 +392,19 @@ def LoopCoalescing : Pass<"affine-loop-coalescing", "func::FuncOp"> { let summary = "Coalesce nested loops with independent bounds into a single " "loop"; - let constructor = "mlir::createLoopCoalescingPass()"; + let constructor = "mlir::affine::createLoopCoalescingPass()"; let dependentDialects = ["arith::ArithDialect"]; } def SimplifyAffineStructures : Pass<"affine-simplify-structures", "func::FuncOp"> { let summary = "Simplify affine expressions in maps/sets and normalize " "memrefs"; - let constructor = "mlir::createSimplifyAffineStructuresPass()"; + let constructor = "mlir::affine::createSimplifyAffineStructuresPass()"; } def AffineExpandIndexOps : Pass<"affine-expand-index-ops"> { let summary = "Lower affine operations operating on indices into more fundamental operations"; - let constructor = "mlir::createAffineExpandIndexOpsPass()"; + let constructor = "mlir::affine::createAffineExpandIndexOpsPass()"; } #endif // MLIR_DIALECT_AFFINE_PASSES diff --git a/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h b/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h --- a/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h +++ b/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h @@ -15,12 +15,11 @@ #include "mlir/IR/OpImplementation.h" namespace mlir { -class AffineForOp; namespace func { class FuncOp; } // namespace func namespace affine { -class ForOp; +class AffineForOp; } // namespace affine } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h @@ -18,7 +18,6 @@ #include "mlir/Support/LogicalResult.h" namespace mlir { -class AffineApplyOp; class Location; class OpBuilder; class OpFoldResult; @@ -30,6 +29,9 @@ enum class BoundType; } // namespace presburger +namespace affine { +class AffineApplyOp; + /// Populate patterns that expand affine index operations into more fundamental /// operations (not necessarily restricted to Affine dialect). void populateAffineExpandIndexOpsPatterns(RewritePatternSet &patterns); @@ -83,6 +85,7 @@ ValueBoundsConstraintSet::StopConditionFn stopCondition = nullptr, bool closedUB = false); +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_TRANSFORMS_TRANSFORMS_H diff --git a/mlir/include/mlir/Dialect/Affine/Utils.h b/mlir/include/mlir/Dialect/Affine/Utils.h --- a/mlir/include/mlir/Dialect/Affine/Utils.h +++ b/mlir/include/mlir/Dialect/Affine/Utils.h @@ -18,10 +18,6 @@ #include namespace mlir { - -class AffineForOp; -class AffineIfOp; -class AffineParallelOp; class DominanceInfo; class Operation; class PostDominanceInfo; @@ -36,6 +32,11 @@ struct LogicalResult; +namespace affine { +class AffineForOp; +class AffineIfOp; +class AffineParallelOp; + using ReductionLoopMap = DenseMap>; /// Replaces a parallel affine.for op with a 1-d affine.parallel op. `forOp`'s @@ -384,6 +385,7 @@ Location loc; }; +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_UTILS_H diff --git a/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h b/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h --- a/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h +++ b/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h @@ -16,6 +16,8 @@ namespace mlir { class RewriterBase; +namespace affine { + /// Fills the `combinedOffsets`, `combinedSizes` and `combinedStrides` to use /// when combining a producer slice **into** a consumer slice. /// @@ -99,6 +101,7 @@ const llvm::SmallBitVector &rankReducedSourceDims, SmallVectorImpl &resolvedSizes); +} // namespace affine } // namespace mlir #endif // MLIR_DIALECT_AFFINE_VIEWLIKEINTERFACEUTILS_H diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td @@ -26,7 +26,7 @@ deallocation](/docs/BufferDeallocationInternals/). }]; let dependentDialects = [ - "AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect" + "affine::AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect" ]; let extraClassDeclaration = [{ diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td @@ -37,7 +37,7 @@ let cppNamespace = "::mlir::linalg"; let dependentDialects = [ "arith::ArithDialect", - "AffineDialect", + "affine::AffineDialect", "math::MathDialect", "memref::MemRefDialect", "tensor::TensorDialect", diff --git a/mlir/include/mlir/Dialect/Linalg/Passes.td b/mlir/include/mlir/Dialect/Linalg/Passes.td --- a/mlir/include/mlir/Dialect/Linalg/Passes.td +++ b/mlir/include/mlir/Dialect/Linalg/Passes.td @@ -37,7 +37,7 @@ "Generate rank-reducing slices instead of reassociative reshapes"> ]; let dependentDialects = [ - "linalg::LinalgDialect", "AffineDialect", "memref::MemRefDialect" + "linalg::LinalgDialect", "affine::AffineDialect", "memref::MemRefDialect" ]; } @@ -45,7 +45,7 @@ let summary = "Fuse elementwise operations on tensors"; let constructor = "mlir::createLinalgElementwiseOpFusionPass()"; let dependentDialects = [ - "AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect" + "affine::AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect" ]; } @@ -68,7 +68,7 @@ "loops"; let constructor = "mlir::createConvertLinalgToAffineLoopsPass()"; let dependentDialects = [ - "AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"]; + "affine::AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"]; } def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "func::FuncOp"> { @@ -77,7 +77,7 @@ let dependentDialects = [ "linalg::LinalgDialect", "scf::SCFDialect", - "AffineDialect" + "affine::AffineDialect" ]; } @@ -87,7 +87,7 @@ "loops"; let constructor = "mlir::createConvertLinalgToParallelLoopsPass()"; let dependentDialects = [ - "AffineDialect", + "affine::AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect", "scf::SCFDialect" @@ -98,7 +98,7 @@ let summary = "Bufferize the linalg dialect"; let constructor = "mlir::createLinalgBufferizePass()"; let dependentDialects = [ - "AffineDialect", + "affine::AffineDialect", "bufferization::BufferizationDialect", "linalg::LinalgDialect", "memref::MemRefDialect", diff --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h --- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h +++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h @@ -18,10 +18,13 @@ namespace mlir { class AffineExpr; -class AffineForOp; class AffineMap; class PatternRewriter; +namespace affine { +class AffineForOp; +} // namespace affine + namespace tensor { class ExtractSliceOp; } // namespace tensor diff --git a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td @@ -24,7 +24,7 @@ }]; let constructor = "mlir::memref::createFoldMemRefAliasOpsPass()"; let dependentDialects = [ - "AffineDialect", "memref::MemRefDialect", "vector::VectorDialect" + "affine::AffineDialect", "memref::MemRefDialect", "vector::VectorDialect" ]; } @@ -156,7 +156,7 @@ ``` }]; let constructor = "mlir::memref::createNormalizeMemRefsPass()"; - let dependentDialects = ["AffineDialect"]; + let dependentDialects = ["affine::AffineDialect"]; } def ResolveRankedShapeTypeResultDims : @@ -184,7 +184,7 @@ }]; let constructor = "mlir::memref::createResolveShapedTypeResultDimsPass()"; let dependentDialects = [ - "AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect" + "affine::AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect" ]; } @@ -199,7 +199,7 @@ }]; let constructor = "mlir::memref::createExpandStridedMetadataPass()"; let dependentDialects = [ - "AffineDialect", "memref::MemRefDialect" + "affine::AffineDialect", "memref::MemRefDialect" ]; } #endif // MLIR_DIALECT_MEMREF_TRANSFORMS_PASSES diff --git a/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td b/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td @@ -24,7 +24,7 @@ : Pass<"scf-for-loop-canonicalization"> { let summary = "Canonicalize operations within scf.for loop bodies"; let constructor = "mlir::createSCFForLoopCanonicalizationPass()"; - let dependentDialects = ["AffineDialect", "tensor::TensorDialect", + let dependentDialects = ["affine::AffineDialect", "tensor::TensorDialect", "memref::MemRefDialect"]; } @@ -37,7 +37,7 @@ "Do not peel loops inside of the last, partial iteration of another " "already peeled loop."> ]; - let dependentDialects = ["AffineDialect"]; + let dependentDialects = ["affine::AffineDialect"]; } def SCFForLoopSpecialization : Pass<"scf-for-loop-specialization"> { @@ -109,7 +109,7 @@ "Perform tiling with fixed upper bound with inbound check " "inside the internal loops"> ]; - let dependentDialects = ["AffineDialect"]; + let dependentDialects = ["affine::AffineDialect"]; } def SCFForLoopRangeFolding : Pass<"scf-for-loop-range-folding"> { diff --git a/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h b/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h --- a/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h +++ b/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h @@ -18,9 +18,7 @@ #include "mlir/Support/LogicalResult.h" namespace mlir { -class AffineApplyOp; class AffineMap; -class FlatAffineValueConstraints; struct LogicalResult; class Operation; class OpFoldResult; @@ -28,6 +26,10 @@ class Value; class ValueRange; +namespace affine { +class FlatAffineValueConstraints; +} // namespace affine + namespace scf { class IfOp; @@ -45,7 +47,7 @@ /// Populate the given constraint set with induction variable constraints of a /// "for" loop with the given range and step. -LogicalResult addLoopRangeConstraints(FlatAffineValueConstraints &cstr, +LogicalResult addLoopRangeConstraints(affine::FlatAffineValueConstraints &cstr, Value iv, OpFoldResult lb, OpFoldResult ub, OpFoldResult step); diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td @@ -70,7 +70,7 @@ }]; let constructor = "mlir::createSparsificationPass()"; let dependentDialects = [ - "AffineDialect", + "affine::AffineDialect", "arith::ArithDialect", "bufferization::BufferizationDialect", "LLVM::LLVMDialect", diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td @@ -48,7 +48,7 @@ let hasCanonicalizer = 1; let hasConstantMaterializer = 1; let dependentDialects = [ - "AffineDialect", + "affine::AffineDialect", "arith::ArithDialect", "complex::ComplexDialect", ]; diff --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td @@ -23,7 +23,7 @@ }]; let constructor = "mlir::tensor::createFoldTensorSubsetOpsPass()"; let dependentDialects = [ - "AffineDialect", "tensor::TensorDialect", "vector::VectorDialect" + "affine::AffineDialect", "tensor::TensorDialect", "vector::VectorDialect" ]; } diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -18,8 +18,6 @@ namespace mlir { // Forward declarations. -class AffineApplyOp; -class AffineForOp; class AffineMap; class Block; class Location; @@ -30,6 +28,11 @@ class VectorType; class VectorTransferOpInterface; +namespace affine { +class AffineApplyOp; +class AffineForOp; +} // namespace affine + namespace vector { /// Helper function that creates a memref::DimOp or tensor::DimOp depending on /// the type of `source`. diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -86,7 +86,7 @@ inline void registerAllDialects(DialectRegistry ®istry) { // clang-format off registry.insert(); + target.addLegalDialect(); target.addLegalOp(); RewritePatternSet patterns(&getContext()); populateLinalgToStandardConversionPatterns(patterns); diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp --- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp +++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp @@ -36,6 +36,7 @@ #define DEBUG_TYPE "loops-to-gpu" using namespace mlir; +using namespace mlir::affine; using namespace mlir::scf; // Name of internal attribute to mark visited operations during conversion. diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp --- a/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp +++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp @@ -42,7 +42,7 @@ void runOnOperation() override { for (Operation &op : llvm::make_early_inc_range( getOperation().getFunctionBody().getOps())) { - if (auto forOp = dyn_cast(&op)) { + if (auto forOp = dyn_cast(&op)) { if (failed(convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims))) signalPassFailure(); diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -66,7 +66,7 @@ SmallVector dims(dimValues.begin(), dimValues.end()); dims.push_back(prevIdx); AffineExpr d0 = rewriter.getAffineDimExpr(offsetMap.getNumDims()); - indices[dim.getPosition()] = makeComposedAffineApply( + indices[dim.getPosition()] = affine::makeComposedAffineApply( rewriter, loc, d0 + offsetMap.getResult(offsetsIdx++), dims); continue; } diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -102,7 +102,8 @@ AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); Value offset = adaptor.getIndices()[*dim]; - indices[*dim] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); + indices[*dim] = + affine::makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); } } @@ -178,7 +179,8 @@ AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); Value base = xferOp.getIndices()[*dim]; - Value memrefIdx = makeComposedAffineApply(b, loc, d0 + d1, {base, iv}); + Value memrefIdx = + affine::makeComposedAffineApply(b, loc, d0 + d1, {base, iv}); cond = lb.create(arith::CmpIPredicate::sgt, memrefDim, memrefIdx); } @@ -1111,7 +1113,8 @@ AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); Value offset = memrefIndices[dim]; - memrefIndices[dim] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); + memrefIndices[dim] = + affine::makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); return dim; } diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp --- a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp @@ -31,6 +31,7 @@ #define DEBUG_TYPE "affine-analysis" using namespace mlir; +using namespace affine; using namespace presburger; /// Get the value that is being reduced by `pos`-th reduction in the loop if @@ -78,7 +79,7 @@ } /// Populate `supportedReductions` with descriptors of the supported reductions. -void mlir::getSupportedReductions( +void mlir::affine::getSupportedReductions( AffineForOp forOp, SmallVectorImpl &supportedReductions) { unsigned numIterArgs = forOp.getNumIterOperands(); if (numIterArgs == 0) @@ -94,8 +95,8 @@ /// Returns true if `forOp' is a parallel loop. If `parallelReductions` is /// provided, populates it with descriptors of the parallelizable reductions and /// treats them as not preventing parallelization. -bool mlir::isLoopParallel(AffineForOp forOp, - SmallVectorImpl *parallelReductions) { +bool mlir::affine::isLoopParallel( + AffineForOp forOp, SmallVectorImpl *parallelReductions) { unsigned numIterArgs = forOp.getNumIterOperands(); // Loop is not parallel if it has SSA loop-carried dependences and reduction @@ -132,7 +133,7 @@ return viewOp && isLocallyDefined(viewOp.getViewSource(), enclosingOp); } -bool mlir::isLoopMemoryParallel(AffineForOp forOp) { +bool mlir::affine::isLoopMemoryParallel(AffineForOp forOp) { // Any memref-typed iteration arguments are treated as serializing. if (llvm::any_of(forOp.getResultTypes(), [](Type type) { return type.isa(); })) @@ -186,7 +187,7 @@ /// and ending at operands which are not defined by AffineApplyOps. // TODO: Add a method to AffineApplyOp which forward substitutes the // AffineApplyOp into any user AffineApplyOps. -void mlir::getReachableAffineApplyOps( +void mlir::affine::getReachableAffineApplyOps( ArrayRef operands, SmallVectorImpl &affineApplyOps) { struct State { // The ssa value for this node in the DFS traversal. @@ -236,8 +237,8 @@ // FlatAffineValueConstraints. (For eg., by using iv - lb % step = 0 and/or by // introducing a method in FlatAffineValueConstraints // setExprStride(ArrayRef expr, int64_t stride) -LogicalResult mlir::getIndexSet(MutableArrayRef ops, - FlatAffineValueConstraints *domain) { +LogicalResult mlir::affine::getIndexSet(MutableArrayRef ops, + FlatAffineValueConstraints *domain) { SmallVector indices; SmallVector loopOps; size_t numDims = 0; @@ -594,7 +595,7 @@ // // // TODO: Support AffineExprs mod/floordiv/ceildiv. -DependenceResult mlir::checkMemrefAccessDependence( +DependenceResult mlir::affine::checkMemrefAccessDependence( const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints, SmallVector *dependenceComponents, bool allowRAR) { @@ -671,7 +672,7 @@ /// Gathers dependence components for dependences between all ops in loop nest /// rooted at 'forOp' at loop depths in range [1, maxLoopDepth]. -void mlir::getDependenceComponents( +void mlir::affine::getDependenceComponents( AffineForOp forOp, unsigned maxLoopDepth, std::vector> *depCompsVec) { // Collect all load and store ops in loop nest rooted at 'forOp'. diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp --- a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp @@ -31,6 +31,7 @@ #define DEBUG_TYPE "affine-structures" using namespace mlir; +using namespace affine; using namespace presburger; @@ -489,8 +490,8 @@ numRangeDims -= intersectRangeLHS - intersectRangeRHS; } -LogicalResult mlir::getRelationFromMap(AffineMap &map, - FlatAffineRelation &rel) { +LogicalResult mlir::affine::getRelationFromMap(AffineMap &map, + FlatAffineRelation &rel) { // Get flattened affine expressions. std::vector> flatExprs; FlatAffineValueConstraints localVarCst; @@ -525,8 +526,8 @@ return success(); } -LogicalResult mlir::getRelationFromMap(const AffineValueMap &map, - FlatAffineRelation &rel) { +LogicalResult mlir::affine::getRelationFromMap(const AffineValueMap &map, + FlatAffineRelation &rel) { AffineMap affineMap = map.getAffineMap(); if (failed(getRelationFromMap(affineMap, rel))) diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp --- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp @@ -28,13 +28,14 @@ #include using namespace mlir; +using namespace mlir::affine; /// Returns the trip count of the loop as an affine expression if the latter is /// expressible as an affine expression, and nullptr otherwise. The trip count /// expression is simplified before returning. This method only utilizes map /// composition to construct lower and upper bounds before computing the trip /// count expressions. -void mlir::getTripCountMapAndOperands( +void mlir::affine::getTripCountMapAndOperands( AffineForOp forOp, AffineMap *tripCountMap, SmallVectorImpl *tripCountOperands) { MLIRContext *context = forOp.getContext(); @@ -83,7 +84,7 @@ /// otherwise. This method uses affine expression analysis (in turn using /// getTripCount) and is able to determine constant trip count in non-trivial /// cases. -std::optional mlir::getConstantTripCount(AffineForOp forOp) { +std::optional mlir::affine::getConstantTripCount(AffineForOp forOp) { SmallVector operands; AffineMap map; getTripCountMapAndOperands(forOp, &map, &operands); @@ -109,7 +110,7 @@ /// Returns the greatest known integral divisor of the trip count. Affine /// expression analysis is used (indirectly through getTripCount), and /// this method is thus able to determine non-trivial divisors. -uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) { +uint64_t mlir::affine::getLargestDivisorOfTripCount(AffineForOp forOp) { SmallVector operands; AffineMap map; getTripCountMapAndOperands(forOp, &map, &operands); @@ -183,7 +184,8 @@ return !composeOp.getAffineValueMap().isFunctionOf(0, iv); } -DenseSet mlir::getInvariantAccesses(Value iv, ArrayRef indices) { +DenseSet mlir::affine::getInvariantAccesses(Value iv, + ArrayRef indices) { DenseSet res; for (auto val : indices) { if (isAccessIndexInvariant(iv, val)) { @@ -335,8 +337,8 @@ return true; } -bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim, - NestedPattern &vectorTransferMatcher) { +bool mlir::affine::isVectorizableLoopBody( + AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) { *memRefDim = -1; VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) { auto load = dyn_cast(op); @@ -358,8 +360,8 @@ return isVectorizableLoopBodyWithOpCond(loop, fun, vectorTransferMatcher); } -bool mlir::isVectorizableLoopBody(AffineForOp loop, - NestedPattern &vectorTransferMatcher) { +bool mlir::affine::isVectorizableLoopBody( + AffineForOp loop, NestedPattern &vectorTransferMatcher) { return isVectorizableLoopBodyWithOpCond(loop, nullptr, vectorTransferMatcher); } @@ -368,7 +370,8 @@ /// 'def' and all its uses have the same shift factor. // TODO: extend this to check for memory-based dependence violation when we have // the support. -bool mlir::isOpwiseShiftValid(AffineForOp forOp, ArrayRef shifts) { +bool mlir::affine::isOpwiseShiftValid(AffineForOp forOp, + ArrayRef shifts) { auto *forBody = forOp.getBody(); assert(shifts.size() == forBody->getOperations().size()); diff --git a/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp b/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp --- a/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp @@ -17,6 +17,7 @@ #include "llvm/Support/raw_ostream.h" using namespace mlir; +using namespace mlir::affine; llvm::BumpPtrAllocator *&NestedMatch::allocator() { thread_local llvm::BumpPtrAllocator *allocator = nullptr; @@ -130,6 +131,7 @@ static bool isAffineIfOp(Operation &op) { return isa(op); } namespace mlir { +namespace affine { namespace matcher { NestedPattern Op(FilterFunctionType filter) { @@ -176,4 +178,5 @@ } } // namespace matcher +} // namespace affine } // namespace mlir diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp --- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp @@ -28,6 +28,7 @@ #define DEBUG_TYPE "analysis-utils" using namespace mlir; +using namespace affine; using namespace presburger; using llvm::SmallDenseMap; @@ -501,7 +502,8 @@ } } -void mlir::getAffineForIVs(Operation &op, SmallVectorImpl *loops) { +void mlir::affine::getAffineForIVs(Operation &op, + SmallVectorImpl *loops) { auto *currOp = op.getParentOp(); AffineForOp currAffineForOp; // Traverse up the hierarchy collecting all 'affine.for' operation while @@ -514,8 +516,8 @@ std::reverse(loops->begin(), loops->end()); } -void mlir::getEnclosingAffineOps(Operation &op, - SmallVectorImpl *ops) { +void mlir::affine::getEnclosingAffineOps(Operation &op, + SmallVectorImpl *ops) { ops->clear(); Operation *currOp = op.getParentOp(); @@ -1063,7 +1065,7 @@ } std::optional -mlir::getMemRefIntOrFloatEltSizeInBytes(MemRefType memRefType) { +mlir::affine::getMemRefIntOrFloatEltSizeInBytes(MemRefType memRefType) { auto elementType = memRefType.getElementType(); unsigned sizeInBits; @@ -1113,7 +1115,7 @@ /// into account size of the vector as well. // TODO: improve/complete this when we have target data. std::optional -mlir::getIntOrFloatMemRefSizeInBytes(MemRefType memRefType) { +mlir::affine::getIntOrFloatMemRefSizeInBytes(MemRefType memRefType) { if (!memRefType.hasStaticShape()) return std::nullopt; auto elementType = memRefType.getElementType(); @@ -1130,8 +1132,8 @@ } template -LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOp loadOrStoreOp, - bool emitError) { +LogicalResult mlir::affine::boundCheckLoadOrStoreOp(LoadOrStoreOp loadOrStoreOp, + bool emitError) { static_assert(llvm::is_one_of::value, "argument should be either a AffineReadOpInterface or a " @@ -1186,9 +1188,11 @@ // Explicitly instantiate the template so that the compiler knows we need them! template LogicalResult -mlir::boundCheckLoadOrStoreOp(AffineReadOpInterface loadOp, bool emitError); +mlir::affine::boundCheckLoadOrStoreOp(AffineReadOpInterface loadOp, + bool emitError); template LogicalResult -mlir::boundCheckLoadOrStoreOp(AffineWriteOpInterface storeOp, bool emitError); +mlir::affine::boundCheckLoadOrStoreOp(AffineWriteOpInterface storeOp, + bool emitError); // Returns in 'positions' the Block positions of 'op' in each ancestor // Block from the Block containing operation, stopping at 'limitBlock'. @@ -1250,7 +1254,7 @@ /// Returns the innermost common loop depth for the set of operations in 'ops'. // TODO: Move this to LoopUtils. -unsigned mlir::getInnermostCommonLoopDepth( +unsigned mlir::affine::getInnermostCommonLoopDepth( ArrayRef ops, SmallVectorImpl *surroundingLoops) { unsigned numOps = ops.size(); assert(numOps > 0 && "Expected at least one operation"); @@ -1282,10 +1286,10 @@ /// then verifies if it is valid. Returns 'SliceComputationResult::Success' if /// union was computed correctly, an appropriate failure otherwise. SliceComputationResult -mlir::computeSliceUnion(ArrayRef opsA, ArrayRef opsB, - unsigned loopDepth, unsigned numCommonLoops, - bool isBackwardSlice, - ComputationSliceState *sliceUnion) { +mlir::affine::computeSliceUnion(ArrayRef opsA, + ArrayRef opsB, unsigned loopDepth, + unsigned numCommonLoops, bool isBackwardSlice, + ComputationSliceState *sliceUnion) { // Compute the union of slice bounds between all pairs in 'opsA' and // 'opsB' in 'sliceUnionCst'. FlatAffineValueConstraints sliceUnionCst; @@ -1322,8 +1326,9 @@ // Compute slice bounds for 'srcAccess' and 'dstAccess'. ComputationSliceState tmpSliceState; - mlir::getComputationSliceState(i, j, &dependenceConstraints, loopDepth, - isBackwardSlice, &tmpSliceState); + mlir::affine::getComputationSliceState(i, j, &dependenceConstraints, + loopDepth, isBackwardSlice, + &tmpSliceState); if (sliceUnionCst.getNumDimAndSymbolVars() == 0) { // Initialize 'sliceUnionCst' with the bounds computed in previous step. @@ -1465,7 +1470,7 @@ // nest surrounding represented by slice loop bounds in 'slice'. Returns true // on success, false otherwise (if a non-constant trip count was encountered). // TODO: Make this work with non-unit step loops. -bool mlir::buildSliceTripCountMap( +bool mlir::affine::buildSliceTripCountMap( const ComputationSliceState &slice, llvm::SmallDenseMap *tripCountMap) { unsigned numSrcLoopIVs = slice.ivs.size(); @@ -1503,7 +1508,7 @@ } // Return the number of iterations in the given slice. -uint64_t mlir::getSliceIterationCount( +uint64_t mlir::affine::getSliceIterationCount( const llvm::SmallDenseMap &sliceTripCountMap) { uint64_t iterCount = 1; for (const auto &count : sliceTripCountMap) { @@ -1517,7 +1522,7 @@ // 'dependenceConstraints' at depth greater than 'loopDepth', and computes slice // bounds in 'sliceState' which represent the one loop nest's IVs in terms of // the other loop nest's IVs, symbols and constants (using 'isBackwardsSlice'). -void mlir::getComputationSliceState( +void mlir::affine::getComputationSliceState( Operation *depSourceOp, Operation *depSinkOp, FlatAffineValueConstraints *dependenceConstraints, unsigned loopDepth, bool isBackwardSlice, ComputationSliceState *sliceState) { @@ -1631,10 +1636,9 @@ // entire destination index set. Subtract out the dependent destination // iterations from destination index set and check for emptiness --- this is one // solution. -AffineForOp -mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst, - unsigned dstLoopDepth, - ComputationSliceState *sliceState) { +AffineForOp mlir::affine::insertBackwardComputationSlice( + Operation *srcOpInst, Operation *dstOpInst, unsigned dstLoopDepth, + ComputationSliceState *sliceState) { // Get loop nest surrounding src operation. SmallVector srcLoopIVs; getAffineForIVs(*srcOpInst, &srcLoopIVs); @@ -1713,7 +1717,7 @@ /// Returns the nesting depth of this statement, i.e., the number of loops /// surrounding this statement. -unsigned mlir::getNestingDepth(Operation *op) { +unsigned mlir::affine::getNestingDepth(Operation *op) { Operation *currOp = op; unsigned depth = 0; while ((currOp = currOp->getParentOp())) { @@ -1741,7 +1745,7 @@ [](AffineExpr e) { return e == 0; }); } -void mlir::getAffineIVs(Operation &op, SmallVectorImpl &ivs) { +void mlir::affine::getAffineIVs(Operation &op, SmallVectorImpl &ivs) { auto *currOp = op.getParentOp(); AffineForOp currAffineForOp; // Traverse up the hierarchy collecting all 'affine.for' and affine.parallel @@ -1758,7 +1762,8 @@ /// Returns the number of surrounding loops common to 'loopsA' and 'loopsB', /// where each lists loops from outer-most to inner-most in loop nest. -unsigned mlir::getNumCommonSurroundingLoops(Operation &a, Operation &b) { +unsigned mlir::affine::getNumCommonSurroundingLoops(Operation &a, + Operation &b) { SmallVector loopsA, loopsB; getAffineIVs(a, loopsA); getAffineIVs(b, loopsB); @@ -1817,8 +1822,8 @@ return totalSizeInBytes; } -std::optional mlir::getMemoryFootprintBytes(AffineForOp forOp, - int memorySpace) { +std::optional mlir::affine::getMemoryFootprintBytes(AffineForOp forOp, + int memorySpace) { auto *forInst = forOp.getOperation(); return ::getMemoryFootprintBytes( *forInst->getBlock(), Block::iterator(forInst), @@ -1826,7 +1831,7 @@ } /// Returns whether a loop is parallel and contains a reduction loop. -bool mlir::isLoopParallelAndContainsReduction(AffineForOp forOp) { +bool mlir::affine::isLoopParallelAndContainsReduction(AffineForOp forOp) { SmallVector reductions; if (!isLoopParallel(forOp, &reductions)) return false; @@ -1835,8 +1840,8 @@ /// Returns in 'sequentialLoops' all sequential loops in loop nest rooted /// at 'forOp'. -void mlir::getSequentialLoops(AffineForOp forOp, - llvm::SmallDenseSet *sequentialLoops) { +void mlir::affine::getSequentialLoops( + AffineForOp forOp, llvm::SmallDenseSet *sequentialLoops) { forOp->walk([&](Operation *op) { if (auto innerFor = dyn_cast(op)) if (!isLoopParallel(innerFor)) @@ -1844,7 +1849,7 @@ }); } -IntegerSet mlir::simplifyIntegerSet(IntegerSet set) { +IntegerSet mlir::affine::simplifyIntegerSet(IntegerSet set) { FlatAffineValueConstraints fac(set); if (fac.isEmpty()) return IntegerSet::getEmptySet(set.getNumDims(), set.getNumSymbols(), @@ -1930,9 +1935,8 @@ // ... | 0 | 0 | -1 | ... | ... | = 0 // 0 | 0 | 1 | -1 | 0 | -1 | >= 0 // -FailureOr -mlir::simplifyConstrainedMinMaxOp(Operation *op, - FlatAffineValueConstraints constraints) { +FailureOr mlir::affine::simplifyConstrainedMinMaxOp( + Operation *op, FlatAffineValueConstraints constraints) { bool isMin = isa(op); assert((isMin || isa(op)) && "expect AffineMin/MaxOp"); MLIRContext *ctx = op->getContext(); @@ -2032,6 +2036,6 @@ newMap.getNumDims(), newMap.getNumSymbols()); } } - mlir::canonicalizeMapAndOperands(&newMap, &newOperands); + affine::canonicalizeMapAndOperands(&newMap, &newOperands); return AffineValueMap(newMap, newOperands); } diff --git a/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp b/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp @@ -9,6 +9,7 @@ #include "mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h" using namespace mlir; +using namespace mlir::affine; //===----------------------------------------------------------------------===// // Affine Memory Op Interfaces diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -26,6 +26,7 @@ #include using namespace mlir; +using namespace mlir::affine; #define DEBUG_TYPE "affine-ops" @@ -35,7 +36,7 @@ /// `region` or is an argument of `region`. A value of index type defined at the /// top level of a `AffineScope` region is always a valid symbol for all /// uses in that region. -bool mlir::isTopLevelValue(Value value, Region *region) { +bool mlir::affine::isTopLevelValue(Value value, Region *region) { if (auto arg = value.dyn_cast()) return arg.getParentRegion() == region; return value.getDefiningOp()->getParentRegion() == region; @@ -231,7 +232,7 @@ /// op with trait `AffineScope`. If the value is defined in an unlinked region, /// conservatively assume it is not top-level. A value of index type defined at /// the top level is always a valid symbol. -bool mlir::isTopLevelValue(Value value) { +bool mlir::affine::isTopLevelValue(Value value) { if (auto arg = value.dyn_cast()) { // The block owning the argument may be unlinked, e.g. when the surrounding // region has not yet been attached to an Op, at which point the parent Op @@ -246,7 +247,7 @@ /// Returns the closest region enclosing `op` that is held by an operation with /// trait `AffineScope`; `nullptr` if there is no such region. -Region *mlir::getAffineScope(Operation *op) { +Region *mlir::affine::getAffineScope(Operation *op) { auto *curOp = op; while (auto *parentOp = curOp->getParentOp()) { if (parentOp->hasTrait()) @@ -261,7 +262,7 @@ // *) It is valid as a symbol. // *) It is an induction variable. // *) It is the result of affine apply operation with dimension id arguments. -bool mlir::isValidDim(Value value) { +bool mlir::affine::isValidDim(Value value) { // The value must be an index type. if (!value.getType().isIndex()) return false; @@ -281,7 +282,7 @@ // *) It is valid as a symbol. // *) It is an induction variable. // *) It is the result of an affine apply operation with dimension id operands. -bool mlir::isValidDim(Value value, Region *region) { +bool mlir::affine::isValidDim(Value value, Region *region) { // The value must be an index type. if (!value.getType().isIndex()) return false; @@ -358,7 +359,7 @@ // *) It is the result of an affine.apply operation with symbol operands. // *) It is a result of the dim op on a memref whose corresponding size is a // valid symbol. -bool mlir::isValidSymbol(Value value) { +bool mlir::affine::isValidSymbol(Value value) { if (!value) return false; @@ -387,7 +388,7 @@ /// If `region` is null, conservatively assume the symbol definition scope does /// not exist and only accept the values that would be symbols regardless of /// the surrounding region structure, i.e. the first three cases above. -bool mlir::isValidSymbol(Value value, Region *region) { +bool mlir::affine::isValidSymbol(Value value, Region *region) { // The value must be an index type. if (!value.getType().isIndex()) return false; @@ -447,9 +448,8 @@ } /// Parses dimension and symbol list and returns true if parsing failed. -ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser, - SmallVectorImpl &operands, - unsigned &numDims) { +ParseResult mlir::affine::parseDimAndSymbolList( + OpAsmParser &parser, SmallVectorImpl &operands, unsigned &numDims) { SmallVector opInfos; if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren)) return failure(); @@ -541,7 +541,7 @@ // its operands are valid dimension ids. bool AffineApplyOp::isValidDim() { return llvm::all_of(getOperands(), - [](Value op) { return mlir::isValidDim(op); }); + [](Value op) { return affine::isValidDim(op); }); } // The result of the affine apply operation can be used as a dimension id if all @@ -556,14 +556,14 @@ // operands are symbols. bool AffineApplyOp::isValidSymbol() { return llvm::all_of(getOperands(), - [](Value op) { return mlir::isValidSymbol(op); }); + [](Value op) { return affine::isValidSymbol(op); }); } // The result of the affine apply operation can be used as a symbol in `region` // if all its operands are symbols in `region`. bool AffineApplyOp::isValidSymbol(Region *region) { return llvm::all_of(getOperands(), [&](Value operand) { - return mlir::isValidSymbol(operand, region); + return affine::isValidSymbol(operand, region); }); } @@ -1071,8 +1071,8 @@ *map = simplifyAffineMap(*map); } -void mlir::fullyComposeAffineMapAndOperands(AffineMap *map, - SmallVectorImpl *operands) { +void mlir::affine::fullyComposeAffineMapAndOperands( + AffineMap *map, SmallVectorImpl *operands) { while (llvm::any_of(*operands, [](Value v) { return isa_and_nonnull(v.getDefiningOp()); })) { @@ -1168,9 +1168,9 @@ return op->getResult(0); } -AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc, - AffineMap map, - ValueRange operands) { +AffineApplyOp mlir::affine::makeComposedAffineApply(OpBuilder &b, Location loc, + AffineMap map, + ValueRange operands) { AffineMap normalizedMap = map; SmallVector normalizedOperands(operands.begin(), operands.end()); composeAffineMapAndOperands(&normalizedMap, &normalizedOperands); @@ -1178,8 +1178,9 @@ return b.create(loc, normalizedMap, normalizedOperands); } -AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc, - AffineExpr e, ValueRange values) { +AffineApplyOp mlir::affine::makeComposedAffineApply(OpBuilder &b, Location loc, + AffineExpr e, + ValueRange values) { return makeComposedAffineApply( b, loc, AffineMap::inferFromExprList(ArrayRef{e}).front(), values); @@ -1216,8 +1217,9 @@ } OpFoldResult -mlir::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, - ArrayRef operands) { +mlir::affine::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef operands) { assert(map.getNumResults() == 1 && "building affine.apply with !=1 result"); SmallVector constants; @@ -1234,14 +1236,16 @@ } OpFoldResult -mlir::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineExpr expr, - ArrayRef operands) { +mlir::affine::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, + AffineExpr expr, + ArrayRef operands) { return makeComposedFoldedAffineApply( b, loc, AffineMap::inferFromExprList(ArrayRef{expr}).front(), operands); } -SmallVector mlir::makeComposedFoldedMultiResultAffineApply( +SmallVector +mlir::affine::makeComposedFoldedMultiResultAffineApply( OpBuilder &b, Location loc, AffineMap map, ArrayRef operands) { return llvm::to_vector(llvm::map_range( @@ -1251,8 +1255,8 @@ })); } -Value mlir::makeComposedAffineMin(OpBuilder &b, Location loc, AffineMap map, - ValueRange operands) { +Value mlir::affine::makeComposedAffineMin(OpBuilder &b, Location loc, + AffineMap map, ValueRange operands) { SmallVector allOperands = llvm::to_vector(operands); composeMultiResultAffineMap(map, allOperands); return b.createOrFold(loc, b.getIndexType(), map, allOperands); @@ -1277,14 +1281,16 @@ } OpFoldResult -mlir::makeComposedFoldedAffineMin(OpBuilder &b, Location loc, AffineMap map, - ArrayRef operands) { +mlir::affine::makeComposedFoldedAffineMin(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef operands) { return makeComposedFoldedMinMax(b, loc, map, operands); } OpFoldResult -mlir::makeComposedFoldedAffineMax(OpBuilder &b, Location loc, AffineMap map, - ArrayRef operands) { +mlir::affine::makeComposedFoldedAffineMax(OpBuilder &b, Location loc, + AffineMap map, + ArrayRef operands) { return makeComposedFoldedMinMax(b, loc, map, operands); } @@ -1299,8 +1305,9 @@ return b.createOrFold(loc, map, operands); } -SmallVector mlir::applyMapToValues(OpBuilder &b, Location loc, - AffineMap map, ValueRange values) { +SmallVector mlir::affine::applyMapToValues(OpBuilder &b, Location loc, + AffineMap map, + ValueRange values) { SmallVector res; res.reserve(map.getNumResults()); unsigned numDims = map.getNumDims(), numSym = map.getNumSymbols(); @@ -1436,13 +1443,13 @@ *operands = resultOperands; } -void mlir::canonicalizeMapAndOperands(AffineMap *map, - SmallVectorImpl *operands) { +void mlir::affine::canonicalizeMapAndOperands( + AffineMap *map, SmallVectorImpl *operands) { canonicalizeMapOrSetAndOperands(map, operands); } -void mlir::canonicalizeSetAndOperands(IntegerSet *set, - SmallVectorImpl *operands) { +void mlir::affine::canonicalizeSetAndOperands( + IntegerSet *set, SmallVectorImpl *operands) { canonicalizeMapOrSetAndOperands(set, operands); } @@ -2518,19 +2525,19 @@ /// Returns true if the provided value is the induction variable of a /// AffineForOp. -bool mlir::isAffineForInductionVar(Value val) { +bool mlir::affine::isAffineForInductionVar(Value val) { return getForInductionVarOwner(val) != AffineForOp(); } -bool mlir::isAffineParallelInductionVar(Value val) { +bool mlir::affine::isAffineParallelInductionVar(Value val) { return getAffineParallelInductionVarOwner(val) != nullptr; } -bool mlir::isAffineInductionVar(Value val) { +bool mlir::affine::isAffineInductionVar(Value val) { return isAffineForInductionVar(val) || isAffineParallelInductionVar(val); } -AffineForOp mlir::getForInductionVarOwner(Value val) { +AffineForOp mlir::affine::getForInductionVarOwner(Value val) { auto ivArg = val.dyn_cast(); if (!ivArg || !ivArg.getOwner()) return AffineForOp(); @@ -2541,7 +2548,7 @@ return AffineForOp(); } -AffineParallelOp mlir::getAffineParallelInductionVarOwner(Value val) { +AffineParallelOp mlir::affine::getAffineParallelInductionVarOwner(Value val) { auto ivArg = val.dyn_cast(); if (!ivArg || !ivArg.getOwner()) return nullptr; @@ -2554,15 +2561,15 @@ /// Extracts the induction variables from a list of AffineForOps and returns /// them. -void mlir::extractForInductionVars(ArrayRef forInsts, - SmallVectorImpl *ivs) { +void mlir::affine::extractForInductionVars(ArrayRef forInsts, + SmallVectorImpl *ivs) { ivs->reserve(forInsts.size()); for (auto forInst : forInsts) ivs->push_back(forInst.getInductionVar()); } -void mlir::extractInductionVars(ArrayRef affineOps, - SmallVectorImpl &ivs) { +void mlir::affine::extractInductionVars(ArrayRef affineOps, + SmallVectorImpl &ivs) { ivs.reserve(affineOps.size()); for (Operation *op : affineOps) { // Add constraints from forOp's bounds. @@ -2640,7 +2647,7 @@ /*iterArgs=*/std::nullopt, bodyBuilderFn); } -void mlir::buildAffineLoopNest( +void mlir::affine::buildAffineLoopNest( OpBuilder &builder, Location loc, ArrayRef lbs, ArrayRef ubs, ArrayRef steps, function_ref bodyBuilderFn) { @@ -2648,7 +2655,7 @@ buildAffineLoopFromConstants); } -void mlir::buildAffineLoopNest( +void mlir::affine::buildAffineLoopNest( OpBuilder &builder, Location loc, ValueRange lbs, ValueRange ubs, ArrayRef steps, function_ref bodyBuilderFn) { @@ -2656,11 +2663,12 @@ buildAffineLoopFromValues); } -AffineForOp mlir::replaceForOpWithNewYields(OpBuilder &b, AffineForOp loop, - ValueRange newIterOperands, - ValueRange newYieldedValues, - ValueRange newIterArgs, - bool replaceLoopResults) { +AffineForOp mlir::affine::replaceForOpWithNewYields(OpBuilder &b, + AffineForOp loop, + ValueRange newIterOperands, + ValueRange newYieldedValues, + ValueRange newIterArgs, + bool replaceLoopResults) { assert(newIterOperands.size() == newYieldedValues.size() && "newIterOperands must be of the same size as newYieldedValues"); // Create a new loop before the existing one, with the extra operands. diff --git a/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp b/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp @@ -10,6 +10,7 @@ #include "mlir/Dialect/Affine/IR/AffineOps.h" using namespace mlir; +using namespace mlir::affine; AffineValueMap::AffineValueMap(AffineMap map, ValueRange operands, ValueRange results) diff --git a/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp b/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp @@ -12,6 +12,7 @@ #include "mlir/Interfaces/ValueBoundsOpInterface.h" using namespace mlir; +using namespace mlir::affine; namespace mlir { namespace { diff --git a/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp b/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp --- a/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp +++ b/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp @@ -19,6 +19,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" using namespace mlir; +using namespace mlir::affine; using namespace mlir::transform; //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp @@ -35,13 +35,16 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINEDATACOPYGENERATION #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-data-copy-generate" using namespace mlir; +using namespace mlir::affine; namespace { @@ -57,7 +60,8 @@ // TODO: We currently can't generate copies correctly when stores // are strided. Check for strided stores. struct AffineDataCopyGeneration - : public impl::AffineDataCopyGenerationBase { + : public affine::impl::AffineDataCopyGenerationBase< + AffineDataCopyGeneration> { AffineDataCopyGeneration() = default; explicit AffineDataCopyGeneration(unsigned slowMemorySpace, unsigned fastMemorySpace, @@ -85,17 +89,15 @@ /// by the latter. Only load op's handled for now. /// TODO: extend this to store op's. std::unique_ptr> -mlir::createAffineDataCopyGenerationPass(unsigned slowMemorySpace, - unsigned fastMemorySpace, - unsigned tagMemorySpace, - int minDmaTransferSize, - uint64_t fastMemCapacityBytes) { +mlir::affine::createAffineDataCopyGenerationPass( + unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace, + int minDmaTransferSize, uint64_t fastMemCapacityBytes) { return std::make_unique( slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize, fastMemCapacityBytes); } std::unique_ptr> -mlir::createAffineDataCopyGenerationPass() { +mlir::affine::createAffineDataCopyGenerationPass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp @@ -18,11 +18,14 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINEEXPANDINDEXOPS #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir using namespace mlir; +using namespace mlir::affine; namespace { /// Lowers `affine.delinearize_index` into a sequence of division and remainder @@ -43,7 +46,7 @@ }; class ExpandAffineIndexOpsPass - : public impl::AffineExpandIndexOpsBase { + : public affine::impl::AffineExpandIndexOpsBase { public: ExpandAffineIndexOpsPass() = default; @@ -59,10 +62,11 @@ } // namespace -void mlir::populateAffineExpandIndexOpsPatterns(RewritePatternSet &patterns) { +void mlir::affine::populateAffineExpandIndexOpsPatterns( + RewritePatternSet &patterns) { patterns.insert(patterns.getContext()); } -std::unique_ptr mlir::createAffineExpandIndexOpsPass() { +std::unique_ptr mlir::affine::createAffineExpandIndexOpsPass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp @@ -34,13 +34,16 @@ #include "llvm/Support/raw_ostream.h" namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINELOOPINVARIANTCODEMOTION #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "licm" using namespace mlir; +using namespace mlir::affine; namespace { @@ -50,7 +53,8 @@ /// TODO: This code should be removed once the new LICM pass can handle its /// uses. struct LoopInvariantCodeMotion - : public impl::AffineLoopInvariantCodeMotionBase { + : public affine::impl::AffineLoopInvariantCodeMotionBase< + LoopInvariantCodeMotion> { void runOnOperation() override; void runOnAffineForOp(AffineForOp forOp); }; @@ -71,9 +75,9 @@ SmallPtrSetImpl &opsToHoist); // Returns true if the individual op is loop invariant. -bool isOpLoopInvariant(Operation &op, Value indVar, ValueRange iterArgs, - SmallPtrSetImpl &opsWithUsers, - SmallPtrSetImpl &opsToHoist) { +static bool isOpLoopInvariant(Operation &op, Value indVar, ValueRange iterArgs, + SmallPtrSetImpl &opsWithUsers, + SmallPtrSetImpl &opsToHoist) { LLVM_DEBUG(llvm::dbgs() << "iterating on op: " << op;); if (auto ifOp = dyn_cast(op)) { @@ -167,10 +171,11 @@ } // Checks if all ops in a region (i.e. list of blocks) are loop invariant. -bool areAllOpsInTheBlockListInvariant( - Region &blockList, Value indVar, ValueRange iterArgs, - SmallPtrSetImpl &opsWithUsers, - SmallPtrSetImpl &opsToHoist) { +static bool +areAllOpsInTheBlockListInvariant(Region &blockList, Value indVar, + ValueRange iterArgs, + SmallPtrSetImpl &opsWithUsers, + SmallPtrSetImpl &opsToHoist) { for (auto &b : blockList) { for (auto &op : b) { @@ -183,10 +188,10 @@ } // Returns true if the affine.if op can be hoisted. -bool checkInvarianceOfNestedIfOps(AffineIfOp ifOp, Value indVar, - ValueRange iterArgs, - SmallPtrSetImpl &opsWithUsers, - SmallPtrSetImpl &opsToHoist) { +static bool +checkInvarianceOfNestedIfOps(AffineIfOp ifOp, Value indVar, ValueRange iterArgs, + SmallPtrSetImpl &opsWithUsers, + SmallPtrSetImpl &opsToHoist) { if (!areAllOpsInTheBlockListInvariant(ifOp.getThenRegion(), indVar, iterArgs, opsWithUsers, opsToHoist)) return false; @@ -243,6 +248,6 @@ } std::unique_ptr> -mlir::createAffineLoopInvariantCodeMotionPass() { +mlir::affine::createAffineLoopInvariantCodeMotionPass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp @@ -17,11 +17,14 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINELOOPNORMALIZE #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir using namespace mlir; +using namespace mlir::affine; namespace { @@ -29,7 +32,7 @@ /// As currently implemented, this pass cannot fail, but it might skip over ops /// that are already in a normalized form. struct AffineLoopNormalizePass - : public impl::AffineLoopNormalizeBase { + : public affine::impl::AffineLoopNormalizeBase { explicit AffineLoopNormalizePass(bool promoteSingleIter) { this->promoteSingleIter = promoteSingleIter; } @@ -47,6 +50,6 @@ } // namespace std::unique_ptr> -mlir::createAffineLoopNormalizePass(bool promoteSingleIter) { +mlir::affine::createAffineLoopNormalizePass(bool promoteSingleIter) { return std::make_unique(promoteSingleIter); } diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp @@ -27,18 +27,21 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINEPARALLELIZE #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-parallel" using namespace mlir; +using namespace mlir::affine; namespace { /// Convert all parallel affine.for op into 1-D affine.parallel op. struct AffineParallelize - : public impl::AffineParallelizeBase { + : public affine::impl::AffineParallelizeBase { void runOnOperation() override; }; @@ -89,6 +92,6 @@ } std::unique_ptr> -mlir::createAffineParallelizePass() { +mlir::affine::createAffineParallelizePass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp --- a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp @@ -23,24 +23,28 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINESCALARREPLACEMENT #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-scalrep" using namespace mlir; +using namespace mlir::affine; namespace { struct AffineScalarReplacement - : public impl::AffineScalarReplacementBase { + : public affine::impl::AffineScalarReplacementBase< + AffineScalarReplacement> { void runOnOperation() override; }; } // namespace std::unique_ptr> -mlir::createAffineScalarReplacementPass() { +mlir::affine::createAffineScalarReplacementPass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp b/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp --- a/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp @@ -18,6 +18,7 @@ #include "llvm/Support/Debug.h" using namespace mlir; +using namespace mlir::affine; #define DEBUG_TYPE "decompose-affine-ops" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") @@ -38,8 +39,8 @@ return count; } -void mlir::reorderOperandsByHoistability(RewriterBase &rewriter, - AffineApplyOp op) { +void mlir::affine::reorderOperandsByHoistability(RewriterBase &rewriter, + AffineApplyOp op) { SmallVector numInvariant = llvm::to_vector( llvm::map_range(op->getOpOperands(), [&](OpOperand &operand) { return numEnclosingInvariantLoops(operand); @@ -92,8 +93,8 @@ rhsOperands); } -FailureOr mlir::decompose(RewriterBase &rewriter, - AffineApplyOp op) { +FailureOr mlir::affine::decompose(RewriterBase &rewriter, + AffineApplyOp op) { // 1. Preconditions: only handle dimensionless AffineApplyOp maps with a // top-level binary expression that we can reassociate (i.e. add or mul). AffineMap m = op.getAffineMap(); diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp --- a/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp @@ -19,18 +19,21 @@ #include "llvm/Support/Debug.h" namespace mlir { +namespace affine { #define GEN_PASS_DEF_LOOPCOALESCING #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define PASS_NAME "loop-coalescing" #define DEBUG_TYPE PASS_NAME using namespace mlir; +using namespace mlir::affine; namespace { struct LoopCoalescingPass - : public impl::LoopCoalescingBase { + : public affine::impl::LoopCoalescingBase { void runOnOperation() override { func::FuncOp func = getOperation(); @@ -45,6 +48,7 @@ } // namespace -std::unique_ptr> mlir::createLoopCoalescingPass() { +std::unique_ptr> +mlir::affine::createLoopCoalescingPass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp --- a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp @@ -36,13 +36,16 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINELOOPFUSION #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-loop-fusion" using namespace mlir; +using namespace mlir::affine; namespace { /// Loop fusion pass. This pass currently supports a greedy fusion policy, @@ -54,7 +57,7 @@ // TODO: Extend this pass to check for fusion preventing dependences, // and add support for more general loop fusion algorithms. -struct LoopFusion : public impl::AffineLoopFusionBase { +struct LoopFusion : public affine::impl::AffineLoopFusionBase { LoopFusion() = default; LoopFusion(unsigned fastMemorySpace, uint64_t localBufSizeThresholdBytes, bool maximalFusion, enum FusionMode affineFusionMode) { @@ -1039,7 +1042,7 @@ depthSliceUnions.resize(dstLoopDepthTest); FusionStrategy strategy(FusionStrategy::ProducerConsumer); for (unsigned i = 1; i <= dstLoopDepthTest; ++i) { - FusionResult result = mlir::canFuseLoops( + FusionResult result = affine::canFuseLoops( srcAffineForOp, dstAffineForOp, /*dstLoopDepth=*/i, &depthSliceUnions[i - 1], strategy); @@ -1259,7 +1262,7 @@ unsigned maxLegalFusionDepth = 0; FusionStrategy strategy(memref); for (unsigned i = 1; i <= dstLoopDepthTest; ++i) { - FusionResult result = mlir::canFuseLoops( + FusionResult result = affine::canFuseLoops( sibAffineForOp, dstAffineForOp, /*dstLoopDepth=*/i, &depthSliceUnions[i - 1], strategy); @@ -1291,9 +1294,9 @@ // further inside `fuseLoops`. bool isInnermostInsertion = (bestDstLoopDepth == dstLoopDepthTest); // Fuse computation slice of 'sibLoopNest' into 'dstLoopNest'. - mlir::fuseLoops(sibAffineForOp, dstAffineForOp, - depthSliceUnions[bestDstLoopDepth - 1], - isInnermostInsertion); + affine::fuseLoops(sibAffineForOp, dstAffineForOp, + depthSliceUnions[bestDstLoopDepth - 1], + isInnermostInsertion); auto dstForInst = cast(dstNode->op); // Update operation position of fused loop nest (if needed). @@ -1501,10 +1504,9 @@ runOnBlock(&block); } -std::unique_ptr -mlir::createLoopFusionPass(unsigned fastMemorySpace, - uint64_t localBufSizeThreshold, bool maximalFusion, - enum FusionMode affineFusionMode) { +std::unique_ptr mlir::affine::createLoopFusionPass( + unsigned fastMemorySpace, uint64_t localBufSizeThreshold, + bool maximalFusion, enum FusionMode affineFusionMode) { return std::make_unique(fastMemorySpace, localBufSizeThreshold, maximalFusion, affineFusionMode); } diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp --- a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp @@ -28,18 +28,21 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINELOOPTILING #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir using namespace mlir; +using namespace mlir::affine; #define DEBUG_TYPE "affine-loop-tile" namespace { /// A pass to perform loop tiling on all suitable loop nests of a Function. -struct LoopTiling : public impl::AffineLoopTilingBase { +struct LoopTiling : public affine::impl::AffineLoopTilingBase { LoopTiling() = default; explicit LoopTiling(uint64_t cacheSizeBytes, bool avoidMaxMinBounds = true) : avoidMaxMinBounds(avoidMaxMinBounds) { @@ -62,10 +65,11 @@ /// Creates a pass to perform loop tiling on all suitable loop nests of a /// Function. std::unique_ptr> -mlir::createLoopTilingPass(uint64_t cacheSizeBytes) { +mlir::affine::createLoopTilingPass(uint64_t cacheSizeBytes) { return std::make_unique(cacheSizeBytes); } -std::unique_ptr> mlir::createLoopTilingPass() { +std::unique_ptr> +mlir::affine::createLoopTilingPass() { return std::make_unique(); } @@ -97,7 +101,7 @@ /// hyper-rectangles, which are scheduled in the lexicographically increasing /// order on the vector of loop indices. This function will return failure when /// any dependence component is negative along any of `origLoops`. -static bool checkTilingLegality(MutableArrayRef origLoops) { +static bool checkTilingLegality(MutableArrayRef origLoops) { assert(!origLoops.empty() && "no original loops provided"); // We first find out all dependences we intend to check. diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp --- a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp @@ -25,13 +25,16 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINELOOPUNROLL #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-loop-unroll" using namespace mlir; +using namespace mlir::affine; namespace { @@ -42,7 +45,7 @@ /// full unroll threshold was specified, in which case, fully unrolls all loops /// with trip count less than the specified threshold. The latter is for testing /// purposes, especially for testing outer loop unrolling. -struct LoopUnroll : public impl::AffineLoopUnrollBase { +struct LoopUnroll : public affine::impl::AffineLoopUnrollBase { // Callback to obtain unroll factors; if this has a callable target, takes // precedence over command-line argument or passed argument. const std::function getUnrollFactor; @@ -142,7 +145,7 @@ cleanUpUnroll); } -std::unique_ptr> mlir::createLoopUnrollPass( +std::unique_ptr> mlir::affine::createLoopUnrollPass( int unrollFactor, bool unrollUpToFactor, bool unrollFull, const std::function &getUnrollFactor) { return std::make_unique( diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp --- a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp @@ -49,19 +49,22 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINELOOPUNROLLANDJAM #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-loop-unroll-jam" using namespace mlir; +using namespace mlir::affine; namespace { /// Loop unroll jam pass. Currently, this just unroll jams the first /// outer loop in a Function. struct LoopUnrollAndJam - : public impl::AffineLoopUnrollAndJamBase { + : public affine::impl::AffineLoopUnrollAndJamBase { explicit LoopUnrollAndJam( std::optional unrollJamFactor = std::nullopt) { if (unrollJamFactor) @@ -73,7 +76,7 @@ } // namespace std::unique_ptr> -mlir::createLoopUnrollAndJamPass(int unrollJamFactor) { +mlir::affine::createLoopUnrollAndJamPass(int unrollJamFactor) { return std::make_unique( unrollJamFactor == -1 ? std::nullopt : std::optional(unrollJamFactor)); diff --git a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp --- a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp @@ -27,17 +27,21 @@ #include "llvm/Support/Debug.h" namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINEPIPELINEDATATRANSFER #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "affine-pipeline-data-transfer" using namespace mlir; +using namespace mlir::affine; namespace { struct PipelineDataTransfer - : public impl::AffinePipelineDataTransferBase { + : public affine::impl::AffinePipelineDataTransferBase< + PipelineDataTransfer> { void runOnOperation() override; void runOnAffineForOp(AffineForOp forOp); @@ -49,7 +53,7 @@ /// Creates a pass to pipeline explicit movement of data across levels of the /// memory hierarchy. std::unique_ptr> -mlir::createPipelineDataTransferPass() { +mlir::affine::createPipelineDataTransferPass() { return std::make_unique(); } @@ -328,7 +332,7 @@ instShiftMap[dmaStartOp] = 0; // Set shifts for DMA start op's affine operand computation slices to 0. SmallVector sliceOps; - mlir::createAffineComputationSlice(dmaStartOp, &sliceOps); + affine::createAffineComputationSlice(dmaStartOp, &sliceOps); if (!sliceOps.empty()) { for (auto sliceOp : sliceOps) { instShiftMap[sliceOp.getOperation()] = 0; diff --git a/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp b/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp --- a/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp @@ -14,6 +14,7 @@ #include "mlir/Interfaces/ValueBoundsOpInterface.h" using namespace mlir; +using namespace mlir::affine; static FailureOr reifyValueBound(OpBuilder &b, Location loc, presburger::BoundType type, @@ -54,7 +55,7 @@ } // Simplify and return bound. - mlir::canonicalizeMapAndOperands(&boundMap, &operands); + affine::canonicalizeMapAndOperands(&boundMap, &operands); // Check for special cases where no affine.apply op is needed. if (boundMap.isSingleConstant()) { // Bound is a constant: return an IntegerAttr. @@ -69,10 +70,10 @@ operands[expr.getPosition() + boundMap.getNumDims()]); // General case: build affine.apply op. return static_cast( - b.create(loc, boundMap, operands).getResult()); + b.create(loc, boundMap, operands).getResult()); } -FailureOr mlir::reifyShapedValueDimBound( +FailureOr mlir::affine::reifyShapedValueDimBound( OpBuilder &b, Location loc, presburger::BoundType type, Value value, int64_t dim, ValueBoundsConstraintSet::StopConditionFn stopCondition, bool closedUB) { @@ -89,7 +90,7 @@ closedUB); } -FailureOr mlir::reifyIndexValueBound( +FailureOr mlir::affine::reifyIndexValueBound( OpBuilder &b, Location loc, presburger::BoundType type, Value value, ValueBoundsConstraintSet::StopConditionFn stopCondition, bool closedUB) { auto reifyToOperands = [&](Value v, std::optional d) { diff --git a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp --- a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp @@ -20,13 +20,16 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" namespace mlir { +namespace affine { #define GEN_PASS_DEF_SIMPLIFYAFFINESTRUCTURES #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir #define DEBUG_TYPE "simplify-affine-structure" using namespace mlir; +using namespace mlir::affine; namespace { @@ -35,7 +38,8 @@ /// all memrefs with non-trivial layout maps are converted to ones with trivial /// identity layout ones. struct SimplifyAffineStructures - : public impl::SimplifyAffineStructuresBase { + : public affine::impl::SimplifyAffineStructuresBase< + SimplifyAffineStructures> { void runOnOperation() override; /// Utility to simplify an affine attribute and update its entry in the parent @@ -78,7 +82,7 @@ } // namespace std::unique_ptr> -mlir::createSimplifyAffineStructuresPass() { +mlir::affine::createSimplifyAffineStructuresPass() { return std::make_unique(); } diff --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp --- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp @@ -31,11 +31,14 @@ #include namespace mlir { +namespace affine { #define GEN_PASS_DEF_AFFINEVECTORIZE #include "mlir/Dialect/Affine/Passes.h.inc" +} // namespace affine } // namespace mlir using namespace mlir; +using namespace affine; using namespace vector; /// @@ -585,7 +588,7 @@ static std::optional makePattern(const DenseSet ¶llelLoops, int vectorRank, ArrayRef fastestVaryingPattern) { - using matcher::For; + using affine::matcher::For; int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0]; int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1]; int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2]; @@ -606,7 +609,7 @@ } static NestedPattern &vectorTransferPattern() { - static auto pattern = matcher::Op([](Operation &op) { + static auto pattern = affine::matcher::Op([](Operation &op) { return isa(op); }); return pattern; @@ -616,7 +619,7 @@ /// Base state for the vectorize pass. /// Command line arguments are preempted by non-empty pass arguments. -struct Vectorize : public impl::AffineVectorizeBase { +struct Vectorize : public affine::impl::AffineVectorizeBase { using Base::Base; void runOnOperation() override; @@ -1796,7 +1799,6 @@ return success(); } -namespace mlir { /// External utility to vectorize affine loops in 'loops' using the n-D /// vectorization factors in 'vectorSizes'. By default, each vectorization @@ -1806,10 +1808,10 @@ /// If `reductionLoops` is not empty, the given reduction loops may be /// vectorized along the reduction dimension. /// TODO: Vectorizing reductions is supported only for 1-D vectorization. -void vectorizeAffineLoops(Operation *parentOp, DenseSet &loops, - ArrayRef vectorSizes, - ArrayRef fastestVaryingPattern, - const ReductionLoopMap &reductionLoops) { +void mlir::affine::vectorizeAffineLoops( + Operation *parentOp, DenseSet &loops, + ArrayRef vectorSizes, ArrayRef fastestVaryingPattern, + const ReductionLoopMap &reductionLoops) { // Thread-safe RAII local context, BumpPtrAllocator freed on exit. NestedPatternContext mlContext; vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern, @@ -1851,14 +1853,12 @@ /// loops = {{%i2}}, to vectorize only the first innermost loop; /// loops = {{%i3}}, to vectorize only the second innermost loop; /// loops = {{%i1}}, to vectorize only the middle loop. -LogicalResult -vectorizeAffineLoopNest(std::vector> &loops, - const VectorizationStrategy &strategy) { +LogicalResult mlir::affine::vectorizeAffineLoopNest( + std::vector> &loops, + const VectorizationStrategy &strategy) { // Thread-safe RAII local context, BumpPtrAllocator freed on exit. NestedPatternContext mlContext; if (failed(verifyLoopNesting(loops))) return failure(); return vectorizeLoopNest(loops, strategy); } - -} // namespace mlir diff --git a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp --- a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp @@ -26,6 +26,7 @@ #define DEBUG_TYPE "loop-fusion-utils" using namespace mlir; +using namespace mlir::affine; // Gathers all load and store memref accesses in 'opA' into 'values', where // 'values[memref] == true' for each store operation. @@ -245,10 +246,11 @@ // TODO: This pass performs some computation that is the same for all the depths // (e.g., getMaxLoopDepth). Implement a version of this utility that processes // all the depths at once or only the legal maximal depth for maximal fusion. -FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp, - unsigned dstLoopDepth, - ComputationSliceState *srcSlice, - FusionStrategy fusionStrategy) { +FusionResult mlir::affine::canFuseLoops(AffineForOp srcForOp, + AffineForOp dstForOp, + unsigned dstLoopDepth, + ComputationSliceState *srcSlice, + FusionStrategy fusionStrategy) { // Return 'failure' if 'dstLoopDepth == 0'. if (dstLoopDepth == 0) { LLVM_DEBUG(llvm::dbgs() << "Cannot fuse loop nests at depth 0\n"); @@ -303,7 +305,7 @@ // Calculate the number of common loops surrounding 'srcForOp' and 'dstForOp'. unsigned numCommonLoops = - mlir::getNumCommonSurroundingLoops(*srcForOp, *dstForOp); + affine::getNumCommonSurroundingLoops(*srcForOp, *dstForOp); // Filter out ops in 'opsA' to compute the slice union based on the // assumptions made by the fusion strategy. @@ -335,9 +337,9 @@ // Compute union of computation slices computed between all pairs of ops // from 'forOpA' and 'forOpB'. - SliceComputationResult sliceComputationResult = - mlir::computeSliceUnion(strategyOpsA, opsB, dstLoopDepth, numCommonLoops, - isSrcForOpBeforeDstForOp, srcSlice); + SliceComputationResult sliceComputationResult = affine::computeSliceUnion( + strategyOpsA, opsB, dstLoopDepth, numCommonLoops, + isSrcForOpBeforeDstForOp, srcSlice); if (sliceComputationResult.value == SliceComputationResult::GenericFailure) { LLVM_DEBUG(llvm::dbgs() << "computeSliceUnion failed\n"); return FusionResult::FailPrecondition; @@ -353,8 +355,8 @@ /// Patch the loop body of a forOp that is a single iteration reduction loop /// into its containing block. -LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp, - bool siblingFusionUser) { +static LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp, + bool siblingFusionUser) { // Check if the reduction loop is a single iteration loop. std::optional tripCount = getConstantTripCount(forOp); if (!tripCount || *tripCount != 1) @@ -416,9 +418,9 @@ /// Fuses 'srcForOp' into 'dstForOp' with destination loop block insertion point /// and source slice loop bounds specified in 'srcSlice'. -void mlir::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp, - const ComputationSliceState &srcSlice, - bool isInnermostSiblingInsertion) { +void mlir::affine::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp, + const ComputationSliceState &srcSlice, + bool isInnermostSiblingInsertion) { // Clone 'srcForOp' into 'dstForOp' at 'srcSlice->insertPoint'. OpBuilder b(srcSlice.insertPoint->getBlock(), srcSlice.insertPoint); IRMapping mapper; @@ -465,7 +467,8 @@ /// Collect loop nest statistics (eg. loop trip count and operation count) /// in 'stats' for loop nest rooted at 'forOp'. Returns true on success, /// returns false otherwise. -bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) { +bool mlir::affine::getLoopNestStats(AffineForOp forOpRoot, + LoopNestStats *stats) { auto walkResult = forOpRoot.walk([&](AffineForOp forOp) { auto *childForOp = forOp.getOperation(); auto *parentForOp = forOp->getParentOp(); @@ -553,7 +556,7 @@ /// Currently, the total cost is computed by counting the total operation /// instance count (i.e. total number of operations in the loop body * loop /// trip count) for the entire loop nest. -int64_t mlir::getComputeCost(AffineForOp forOp, LoopNestStats &stats) { +int64_t mlir::affine::getComputeCost(AffineForOp forOp, LoopNestStats &stats) { return getComputeCostHelper(forOp, stats, /*tripCountOverrideMap=*/nullptr, /*computeCostMap=*/nullptr); @@ -564,10 +567,12 @@ /// the total cost is computed by counting the total operation instance count /// (i.e. total number of operations in the loop body * loop trip count) for /// the entire loop nest. -bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats, - AffineForOp dstForOp, LoopNestStats &dstStats, - const ComputationSliceState &slice, - int64_t *computeCost) { +bool mlir::affine::getFusionComputeCost(AffineForOp srcForOp, + LoopNestStats &srcStats, + AffineForOp dstForOp, + LoopNestStats &dstStats, + const ComputationSliceState &slice, + int64_t *computeCost) { llvm::SmallDenseMap sliceTripCountMap; DenseMap computeCostMap; @@ -634,7 +639,7 @@ /// Returns in 'producerConsumerMemrefs' the memrefs involved in a /// producer-consumer dependence between write ops in 'srcOps' and read ops in /// 'dstOps'. -void mlir::gatherProducerConsumerMemrefs( +void mlir::affine::gatherProducerConsumerMemrefs( ArrayRef srcOps, ArrayRef dstOps, DenseSet &producerConsumerMemrefs) { // Gather memrefs from stores in 'srcOps'. diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp --- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp @@ -34,6 +34,7 @@ #define DEBUG_TYPE "loop-utils" using namespace mlir; +using namespace affine; using namespace presburger; using llvm::SmallMapVector; @@ -128,7 +129,7 @@ /// Promotes the loop body of a forOp to its containing block if the forOp /// was known to have a single iteration. // TODO: extend this for arbitrary affine bounds. -LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) { +LogicalResult mlir::affine::promoteIfSingleIteration(AffineForOp forOp) { std::optional tripCount = getConstantTripCount(forOp); if (!tripCount || *tripCount != 1) return failure(); @@ -232,9 +233,9 @@ // asserts preservation of SSA dominance. A check for that as well as that for // memory-based dependence preservation check rests with the users of this // method. -LogicalResult mlir::affineForOpBodySkew(AffineForOp forOp, - ArrayRef shifts, - bool unrollPrologueEpilogue) { +LogicalResult mlir::affine::affineForOpBodySkew(AffineForOp forOp, + ArrayRef shifts, + bool unrollPrologueEpilogue) { assert(forOp.getBody()->getOperations().size() == shifts.size() && "too few/many shifts"); if (forOp.getBody()->begin() == std::prev(forOp.getBody()->end())) @@ -363,7 +364,8 @@ } /// Checks whether a loop nest is hyper-rectangular or not. -LogicalResult checkIfHyperRectangular(MutableArrayRef input) { +static LogicalResult +checkIfHyperRectangular(MutableArrayRef input) { FlatAffineValueConstraints cst; SmallVector ops(input.begin(), input.end()); // 0-d or 1-d is trivially hyper-rectangular. @@ -384,8 +386,8 @@ /// Check if the input nest is supported for tiling and whether tiling would be /// legal or not. template -LogicalResult performPreTilingChecks(MutableArrayRef input, - ArrayRef tileSizes) { +static LogicalResult performPreTilingChecks(MutableArrayRef input, + ArrayRef tileSizes) { assert(input.size() == tileSizes.size() && "Too few/many tile sizes"); if (llvm::any_of(input, @@ -418,15 +420,15 @@ /// Move the loop body of AffineForOp 'src' from 'src' to the start of dest /// body. -void moveLoopBody(AffineForOp src, AffineForOp dest) { +static void moveLoopBody(AffineForOp src, AffineForOp dest) { moveLoopBodyImpl(src, dest, dest.getBody()->begin()); } /// Constructs tiled loop nest, without setting the loop bounds and move the /// body of the original loop nest to the tiled loop nest. -void constructTiledLoopNest(MutableArrayRef origLoops, - AffineForOp rootAffineForOp, unsigned width, - MutableArrayRef tiledLoops) { +static void constructTiledLoopNest(MutableArrayRef origLoops, + AffineForOp rootAffineForOp, unsigned width, + MutableArrayRef tiledLoops) { Location loc = rootAffineForOp.getLoc(); // The outermost among the loops as we add more.. @@ -773,9 +775,9 @@ } LogicalResult -mlir::tilePerfectlyNested(MutableArrayRef input, - ArrayRef tileSizes, - SmallVectorImpl *tiledNest) { +mlir::affine::tilePerfectlyNested(MutableArrayRef input, + ArrayRef tileSizes, + SmallVectorImpl *tiledNest) { if (input.empty()) return success(); @@ -816,10 +818,9 @@ /// loops and intra-tile loops, using SSA values as tiling parameters. A band /// is a contiguous set of loops. // TODO: handle non hyper-rectangular spaces. -LogicalResult -mlir::tilePerfectlyNestedParametric(MutableArrayRef input, - ArrayRef tileSizes, - SmallVectorImpl *tiledNest) { +LogicalResult mlir::affine::tilePerfectlyNestedParametric( + MutableArrayRef input, ArrayRef tileSizes, + SmallVectorImpl *tiledNest) { if (input.empty()) return success(); @@ -859,8 +860,8 @@ /// (the first op being another AffineFor, and the second op - a terminator). /// A loop is perfectly nested iff: the first op in the loop's body is another /// AffineForOp, and the second op is a terminator). -void mlir::getPerfectlyNestedLoops(SmallVectorImpl &nestedLoops, - AffineForOp root) { +void mlir::affine::getPerfectlyNestedLoops( + SmallVectorImpl &nestedLoops, AffineForOp root) { for (unsigned i = 0; i < std::numeric_limits::max(); ++i) { nestedLoops.push_back(root); Block &body = root.getRegion().front(); @@ -876,8 +877,8 @@ /// Identify valid and profitable bands of loops to tile. This is currently just /// a temporary placeholder to test the mechanics of tiled code generation. /// Returns all maximal outermost perfect loop nests to tile. -void mlir::getTileableBands(func::FuncOp f, - std::vector> *bands) { +void mlir::affine::getTileableBands( + func::FuncOp f, std::vector> *bands) { // Get maximal perfect nest of 'affine.for' insts starting from root // (inclusive). for (AffineForOp forOp : f.getOps()) { @@ -888,7 +889,7 @@ } /// Unrolls this loop completely. -LogicalResult mlir::loopUnrollFull(AffineForOp forOp) { +LogicalResult mlir::affine::loopUnrollFull(AffineForOp forOp) { std::optional mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.has_value()) { uint64_t tripCount = *mayBeConstantTripCount; @@ -903,8 +904,8 @@ /// Unrolls this loop by the specified factor or by the trip count (if constant) /// whichever is lower. -LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp, - uint64_t unrollFactor) { +LogicalResult mlir::affine::loopUnrollUpToFactor(AffineForOp forOp, + uint64_t unrollFactor) { std::optional mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.has_value() && *mayBeConstantTripCount < unrollFactor) @@ -1011,7 +1012,7 @@ /// Unrolls this loop by the specified factor. Returns success if the loop /// is successfully unrolled. -LogicalResult mlir::loopUnrollByFactor( +LogicalResult mlir::affine::loopUnrollByFactor( AffineForOp forOp, uint64_t unrollFactor, function_ref annotateFn, bool cleanUpUnroll) { @@ -1078,8 +1079,8 @@ return success(); } -LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp, - uint64_t unrollJamFactor) { +LogicalResult mlir::affine::loopUnrollJamUpToFactor(AffineForOp forOp, + uint64_t unrollJamFactor) { std::optional mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.has_value() && *mayBeConstantTripCount < unrollJamFactor) @@ -1129,8 +1130,8 @@ }; /// Unrolls and jams this loop by the specified factor. -LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp, - uint64_t unrollJamFactor) { +LogicalResult mlir::affine::loopUnrollJamByFactor(AffineForOp forOp, + uint64_t unrollJamFactor) { assert(unrollJamFactor > 0 && "unroll jam factor should be positive"); std::optional mayBeConstantTripCount = getConstantTripCount(forOp); @@ -1212,7 +1213,7 @@ } // Create a new loop with additional iterOperands, iter_args and yield // operands. This new loop will take the loop body of the original loop. - AffineForOp newForOp = mlir::replaceForOpWithNewYields( + AffineForOp newForOp = affine::replaceForOpWithNewYields( builder, oldForOp, dupIterOperands, dupYieldOperands, dupIterArgs); newLoopsWithIterArgs.push_back(newForOp); // `forOp` has been replaced with a new loop. @@ -1323,7 +1324,7 @@ /// Performs loop interchange on 'forOpA' and 'forOpB', where 'forOpB' is /// nested within 'forOpA' as the only non-terminator operation in its block. -void mlir::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) { +void mlir::affine::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) { assert(&*forOpA.getBody()->begin() == forOpB.getOperation()); auto &forOpABody = forOpA.getBody()->getOperations(); auto &forOpBBody = forOpB.getBody()->getOperations(); @@ -1380,8 +1381,8 @@ /// Checks if the loop interchange permutation 'loopPermMap' of the perfectly /// nested sequence of loops in 'loops' would violate dependences. -bool mlir::isValidLoopInterchangePermutation(ArrayRef loops, - ArrayRef loopPermMap) { +bool mlir::affine::isValidLoopInterchangePermutation( + ArrayRef loops, ArrayRef loopPermMap) { // Gather dependence components for dependences between all ops in loop nest // rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth]. assert(loopPermMap.size() == loops.size()); @@ -1394,7 +1395,7 @@ /// Returns true if `loops` is a perfectly nested loop nest, where loops appear /// in it from outermost to innermost. bool LLVM_ATTRIBUTE_UNUSED -mlir::isPerfectlyNested(ArrayRef loops) { +mlir::affine::isPerfectlyNested(ArrayRef loops) { assert(!loops.empty() && "no loops provided"); // We already know that the block can't be empty. @@ -1416,8 +1417,8 @@ // input[i] should move from position i -> permMap[i]. Returns the position in // `input` that becomes the new outermost loop. -unsigned mlir::permuteLoops(MutableArrayRef input, - ArrayRef permMap) { +unsigned mlir::affine::permuteLoops(MutableArrayRef input, + ArrayRef permMap) { assert(input.size() == permMap.size() && "invalid permutation map size"); // Check whether the permutation spec is valid. This is a small vector - we'll // just sort and check if it's iota. @@ -1486,7 +1487,7 @@ // Sinks all sequential loops to the innermost levels (while preserving // relative order among them) and moves all parallel loops to the // outermost (while again preserving relative order among them). -AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) { +AffineForOp mlir::affine::sinkSequentialLoops(AffineForOp forOp) { SmallVector loops; getPerfectlyNestedLoops(loops, forOp); if (loops.size() < 2) @@ -1621,8 +1622,8 @@ } SmallVector, 8> -mlir::tile(ArrayRef forOps, ArrayRef sizes, - ArrayRef targets) { +mlir::affine::tile(ArrayRef forOps, ArrayRef sizes, + ArrayRef targets) { SmallVector, 8> res; SmallVector currentTargets(targets.begin(), targets.end()); for (auto it : llvm::zip(forOps, sizes)) { @@ -1633,9 +1634,9 @@ return res; } -SmallVector mlir::tile(ArrayRef forOps, - ArrayRef sizes, - AffineForOp target) { +SmallVector mlir::affine::tile(ArrayRef forOps, + ArrayRef sizes, + AffineForOp target) { SmallVector res; for (auto loops : tile(forOps, sizes, ArrayRef(target))) { assert(loops.size() == 1); @@ -1644,7 +1645,7 @@ return res; } -LogicalResult mlir::coalesceLoops(MutableArrayRef loops) { +LogicalResult mlir::affine::coalesceLoops(MutableArrayRef loops) { if (loops.size() < 2) return success(); @@ -1758,8 +1759,9 @@ return success(); } -void mlir::mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef processorId, - ArrayRef numProcessors) { +void mlir::affine::mapLoopToProcessorIds(scf::ForOp forOp, + ArrayRef processorId, + ArrayRef numProcessors) { assert(processorId.size() == numProcessors.size()); if (processorId.empty()) return; @@ -2300,11 +2302,11 @@ return true; } -LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin, - Block::iterator end, - const AffineCopyOptions ©Options, - std::optional filterMemRef, - DenseSet ©Nests) { +LogicalResult +mlir::affine::affineDataCopyGenerate(Block::iterator begin, Block::iterator end, + const AffineCopyOptions ©Options, + std::optional filterMemRef, + DenseSet ©Nests) { if (begin == end) return success(); @@ -2490,16 +2492,15 @@ // A convenience version of affineDataCopyGenerate for all ops in the body of // an AffineForOp. -LogicalResult mlir::affineDataCopyGenerate(AffineForOp forOp, - const AffineCopyOptions ©Options, - std::optional filterMemRef, - DenseSet ©Nests) { +LogicalResult mlir::affine::affineDataCopyGenerate( + AffineForOp forOp, const AffineCopyOptions ©Options, + std::optional filterMemRef, DenseSet ©Nests) { return affineDataCopyGenerate(forOp.getBody()->begin(), std::prev(forOp.getBody()->end()), copyOptions, filterMemRef, copyNests); } -LogicalResult mlir::generateCopyForMemRegion( +LogicalResult mlir::affine::generateCopyForMemRegion( const MemRefRegion &memrefRegion, Operation *analyzedOp, const AffineCopyOptions ©Options, CopyGenerateResult &result) { Block *block = analyzedOp->getBlock(); @@ -2543,8 +2544,8 @@ } /// Gathers all AffineForOps in 'func.func' grouped by loop depth. -void mlir::gatherLoops(func::FuncOp func, - std::vector> &depthToLoops) { +void mlir::affine::gatherLoops( + func::FuncOp func, std::vector> &depthToLoops) { for (auto &block : func) gatherLoopsInBlock(&block, /*currLoopDepth=*/0, depthToLoops); @@ -2559,7 +2560,7 @@ // affine.applys, fold to constant if all result dimensions of the map are // constant (canonicalizeMapAndOperands below already does this for single // result bound maps), and use simplifyMap to perform algebraic simplification. -AffineForOp mlir::createCanonicalizedAffineForOp( +AffineForOp mlir::affine::createCanonicalizedAffineForOp( OpBuilder b, Location loc, ValueRange lbOperands, AffineMap lbMap, ValueRange ubOperands, AffineMap ubMap, int64_t step) { SmallVector lowerOperands(lbOperands); @@ -2716,8 +2717,8 @@ } LogicalResult -mlir::separateFullTiles(MutableArrayRef inputNest, - SmallVectorImpl *fullTileNest) { +mlir::affine::separateFullTiles(MutableArrayRef inputNest, + SmallVectorImpl *fullTileNest) { if (inputNest.empty()) return success(); diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -30,6 +30,7 @@ #define DEBUG_TYPE "affine-utils" using namespace mlir; +using namespace affine; using namespace presburger; namespace { @@ -209,17 +210,18 @@ /// Create a sequence of operations that implement the `expr` applied to the /// given dimension and symbol values. -mlir::Value mlir::expandAffineExpr(OpBuilder &builder, Location loc, - AffineExpr expr, ValueRange dimValues, - ValueRange symbolValues) { +mlir::Value mlir::affine::expandAffineExpr(OpBuilder &builder, Location loc, + AffineExpr expr, + ValueRange dimValues, + ValueRange symbolValues) { return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr); } /// Create a sequence of operations that implement the `affineMap` applied to /// the given `operands` (as it it were an AffineApplyOp). std::optional> -mlir::expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, - ValueRange operands) { +mlir::affine::expandAffineMap(OpBuilder &builder, Location loc, + AffineMap affineMap, ValueRange operands) { auto numDims = affineMap.getNumDims(); auto expanded = llvm::to_vector<8>( llvm::map_range(affineMap.getResults(), @@ -341,8 +343,8 @@ } LogicalResult -mlir::affineParallelize(AffineForOp forOp, - ArrayRef parallelReductions) { +mlir::affine::affineParallelize(AffineForOp forOp, + ArrayRef parallelReductions) { // Fail early if there are iter arguments that are not reductions. unsigned numReductions = parallelReductions.size(); if (numReductions != forOp.getNumIterOperands()) @@ -400,7 +402,7 @@ } // Returns success if any hoisting happened. -LogicalResult mlir::hoistAffineIfOp(AffineIfOp ifOp, bool *folded) { +LogicalResult mlir::affine::hoistAffineIfOp(AffineIfOp ifOp, bool *folded) { // Bail out early if the ifOp returns a result. TODO: Consider how to // properly support this case. if (ifOp.getNumResults() != 0) @@ -454,8 +456,9 @@ } // Return the min expr after replacing the given dim. -AffineExpr mlir::substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min, - AffineExpr max, bool positivePath) { +AffineExpr mlir::affine::substWithMin(AffineExpr e, AffineExpr dim, + AffineExpr min, AffineExpr max, + bool positivePath) { if (e == dim) return positivePath ? min : max; if (auto bin = e.dyn_cast()) { @@ -480,7 +483,7 @@ return e; } -void mlir::normalizeAffineParallel(AffineParallelOp op) { +void mlir::affine::normalizeAffineParallel(AffineParallelOp op) { // Loops with min/max in bounds are not normalized at the moment. if (op.hasMinMaxBounds()) return; @@ -544,7 +547,8 @@ ubExprs, op.getContext()); op.setUpperBounds(ranges.getOperands(), newUpperMap); } -LogicalResult mlir::normalizeAffineFor(AffineForOp op, bool promoteSingleIter) { +LogicalResult mlir::affine::normalizeAffineFor(AffineForOp op, + bool promoteSingleIter) { if (promoteSingleIter && succeeded(promoteIfSingleIteration(op))) return success(); @@ -701,7 +705,7 @@ } template -bool mlir::hasNoInterveningEffect(Operation *start, T memOp) { +bool mlir::affine::hasNoInterveningEffect(Operation *start, T memOp) { auto isLocallyAllocated = [](Value memref) { auto *defOp = memref.getDefiningOp(); return defOp && hasSingleEffect(defOp, memref); @@ -894,7 +898,7 @@ // 4. Ensure there is no intermediate operation which could replace the // value in memory. - if (!mlir::hasNoInterveningEffect(storeOp, loadOp)) + if (!affine::hasNoInterveningEffect(storeOp, loadOp)) continue; // We now have a candidate for forwarding. @@ -921,9 +925,10 @@ return success(); } -template bool mlir::hasNoInterveningEffect( - mlir::Operation *, mlir::AffineReadOpInterface); +template bool +mlir::affine::hasNoInterveningEffect( + mlir::Operation *, affine::AffineReadOpInterface); // This attempts to find stores which have no impact on the final result. // A writing op writeA will be eliminated if there exists an op writeB if @@ -961,7 +966,7 @@ // There cannot be an operation which reads from memory between // the two writes. - if (!mlir::hasNoInterveningEffect(writeA, writeB)) + if (!affine::hasNoInterveningEffect(writeA, writeB)) continue; opsToErase.push_back(writeA); @@ -997,7 +1002,7 @@ continue; // 3. There is no write between loadA and loadB. - if (!mlir::hasNoInterveningEffect( + if (!affine::hasNoInterveningEffect( loadB.getOperation(), loadA)) continue; @@ -1055,8 +1060,8 @@ // currently only eliminates the stores only if no other loads/uses (other // than dealloc) remain. // -void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo, - PostDominanceInfo &postDomInfo) { +void mlir::affine::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo, + PostDominanceInfo &postDomInfo) { // Load op's whose results were replaced by those forwarded from stores. SmallVector opsToErase; @@ -1109,13 +1114,11 @@ } // Perform the replacement in `op`. -LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, - Operation *op, - ArrayRef extraIndices, - AffineMap indexRemap, - ArrayRef extraOperands, - ArrayRef symbolOperands, - bool allowNonDereferencingOps) { +LogicalResult mlir::affine::replaceAllMemRefUsesWith( + Value oldMemRef, Value newMemRef, Operation *op, + ArrayRef extraIndices, AffineMap indexRemap, + ArrayRef extraOperands, ArrayRef symbolOperands, + bool allowNonDereferencingOps) { unsigned newMemRefRank = newMemRef.getType().cast().getRank(); (void)newMemRefRank; // unused in opt mode unsigned oldMemRefRank = oldMemRef.getType().cast().getRank(); @@ -1285,7 +1288,7 @@ return success(); } -LogicalResult mlir::replaceAllMemRefUsesWith( +LogicalResult mlir::affine::replaceAllMemRefUsesWith( Value oldMemRef, Value newMemRef, ArrayRef extraIndices, AffineMap indexRemap, ArrayRef extraOperands, ArrayRef symbolOperands, Operation *domOpFilter, @@ -1401,7 +1404,7 @@ /// all the affine.apply op's supplying operands to this opInst did not have any /// uses besides this opInst; otherwise returns the list of affine.apply /// operations created in output argument `sliceOps`. -void mlir::createAffineComputationSlice( +void mlir::affine::createAffineComputationSlice( Operation *opInst, SmallVectorImpl *sliceOps) { // Collect all operands that are results of affine apply ops. SmallVector subOperands; @@ -1709,7 +1712,7 @@ } // TODO: Currently works for static memrefs with a single layout map. -LogicalResult mlir::normalizeMemRef(memref::AllocOp *allocOp) { +LogicalResult mlir::affine::normalizeMemRef(memref::AllocOp *allocOp) { MemRefType memrefType = allocOp->getType(); OpBuilder b(*allocOp); @@ -1767,8 +1770,8 @@ return success(); } -MemRefType mlir::normalizeMemRefType(MemRefType memrefType, - unsigned numSymbolicOperands) { +MemRefType mlir::affine::normalizeMemRefType(MemRefType memrefType, + unsigned numSymbolicOperands) { unsigned rank = memrefType.getRank(); if (rank == 0) return memrefType; @@ -1848,13 +1851,15 @@ return newMemRefType; } -DivModValue mlir::getDivMod(OpBuilder &b, Location loc, Value lhs, Value rhs) { +DivModValue mlir::affine::getDivMod(OpBuilder &b, Location loc, Value lhs, + Value rhs) { DivModValue result; AffineExpr d0, d1; bindDims(b.getContext(), d0, d1); result.quotient = - makeComposedAffineApply(b, loc, d0.floorDiv(d1), {lhs, rhs}); - result.remainder = makeComposedAffineApply(b, loc, d0 % d1, {lhs, rhs}); + affine::makeComposedAffineApply(b, loc, d0.floorDiv(d1), {lhs, rhs}); + result.remainder = + affine::makeComposedAffineApply(b, loc, d0 % d1, {lhs, rhs}); return result; } @@ -1871,9 +1876,9 @@ return result; } -FailureOr> mlir::delinearizeIndex(OpBuilder &b, Location loc, - Value linearIndex, - ArrayRef basis) { +FailureOr> +mlir::affine::delinearizeIndex(OpBuilder &b, Location loc, Value linearIndex, + ArrayRef basis) { unsigned numDims = basis.size(); SmallVector divisors; diff --git a/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp b/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp --- a/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp @@ -12,8 +12,9 @@ #include "mlir/IR/PatternMatch.h" using namespace mlir; +using namespace affine; -LogicalResult mlir::mergeOffsetsSizesAndStrides( +LogicalResult mlir::affine::mergeOffsetsSizesAndStrides( OpBuilder &builder, Location loc, ArrayRef producerOffsets, ArrayRef producerSizes, ArrayRef producerStrides, @@ -58,7 +59,7 @@ return success(); } -LogicalResult mlir::mergeOffsetsSizesAndStrides( +LogicalResult mlir::affine::mergeOffsetsSizesAndStrides( OpBuilder &builder, Location loc, OffsetSizeAndStrideOpInterface producer, OffsetSizeAndStrideOpInterface consumer, const llvm::SmallBitVector &droppedProducerDims, @@ -77,7 +78,7 @@ combinedOffsets, combinedSizes, combinedStrides); } -void mlir::resolveIndicesIntoOpWithOffsetsAndStrides( +void mlir::affine::resolveIndicesIntoOpWithOffsetsAndStrides( RewriterBase &rewriter, Location loc, ArrayRef mixedSourceOffsets, ArrayRef mixedSourceStrides, @@ -109,7 +110,7 @@ } } -void mlir::resolveSizesIntoOpWithSizes( +void mlir::affine::resolveSizesIntoOpWithSizes( ArrayRef sourceSizes, ArrayRef destSizes, const llvm::SmallBitVector &rankReducedSourceDims, SmallVectorImpl &resolvedSizes) { diff --git a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp --- a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp +++ b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp @@ -61,7 +61,7 @@ rewriter.create(loc, indexType, Dimension::z).getResult()}; threadsAndWorkGroups.push_back(blockDimsOfr[0]); threadsAndWorkGroups.push_back(blockDimsOfr[1]); - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, loc, tx + ty * BDX + tz * BDX * BDY, threadsAndWorkGroups); return getValueOrCreateConstantIndexOp(rewriter, loc, ofr); } @@ -137,7 +137,7 @@ // `forallMappingSizes`. Value linearId = buildLinearThreadId(rewriter, loc, this->blockDimsOfr); AffineExpr d0 = getAffineDimExpr(0, rewriter.getContext()); - OpFoldResult warpIdOfr = makeComposedFoldedAffineApply( + OpFoldResult warpIdOfr = affine::makeComposedFoldedAffineApply( rewriter, loc, d0.floorDiv(kWarpSize), {linearId}); Value warpId = getValueOrCreateConstantIndexOp(rewriter, loc, warpIdOfr); // Sizes in [x, y, z] -> [z, y x] order to properly compute strides in @@ -149,7 +149,8 @@ SmallVector ids; // Reverse back to be in [x, y, z] order. for (AffineExpr e : llvm::reverse(delinearizingExprs)) - ids.push_back(makeComposedAffineApply(rewriter, loc, e, warpId)); + ids.push_back( + affine::makeComposedAffineApply(rewriter, loc, e, warpId)); // clang-format off LDBG("----linearId: " << linearId); @@ -204,7 +205,8 @@ SmallVector ids; // Reverse back to be in [x, y, z] order. for (AffineExpr e : llvm::reverse(delinearizingExprs)) - ids.push_back(makeComposedAffineApply(rewriter, loc, e, linearId)); + ids.push_back( + affine::makeComposedAffineApply(rewriter, loc, e, linearId)); // clang-format off LLVM_DEBUG(llvm::interleaveComma(reverseBasisSizes, diff --git a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp --- a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp @@ -81,8 +81,8 @@ GPUDialect::getNumWorkgroupDimensions())))) { Value v = en.value(); auto loop = cast(v.getParentRegion()->getParentOp()); - mapLoopToProcessorIds(loop, {threadIds[en.index()]}, - {blockDims[en.index()]}); + affine::mapLoopToProcessorIds(loop, {threadIds[en.index()]}, + {blockDims[en.index()]}); } } diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp @@ -636,7 +636,7 @@ Location loc = getOperation()->getLoc(); IRRewriter rewriter(b); SmallVector allResultDimValues = - makeComposedFoldedMultiResultAffineApply( + affine::makeComposedFoldedMultiResultAffineApply( rewriter, loc, resultShapesFromInputShapesMap, createFlatListOfOperandDims(b, loc)); int64_t pos = 0; diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -580,7 +580,7 @@ // plus low padding sizes. SmallVector newOffsets; for (const auto &p : llvm::zip(lowPads, oldOffsets)) { - newOffsets.push_back(makeComposedFoldedAffineApply( + newOffsets.push_back(affine::makeComposedFoldedAffineApply( rewriter, loc, addMap, {std::get<0>(p), std::get<1>(p)})); } diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1169,8 +1169,8 @@ AffineExpr s0 = builder.getAffineSymbolExpr(0); AffineExpr s1 = builder.getAffineSymbolExpr(1); Operation *splitPoint = - makeComposedAffineApply(builder, target.getLoc(), s0 * s1, - {spec->lowTileSize, spec->lowTripCount}); + affine::makeComposedAffineApply(builder, target.getLoc(), s0 * s1, + {spec->lowTileSize, spec->lowTripCount}); Operation *lowTileSize = spec->lowTileSize.getDefiningOp(); Operation *highTileSize = spec->highTileSize.getDefiningOp(); assert(lowTileSize && highTileSize && splitPoint && @@ -1420,7 +1420,7 @@ AffineExpr d0, s0; bindDims(rewriter.getContext(), d0); bindSymbols(rewriter.getContext(), s0); - adjustedPackedSizes.push_back(makeComposedFoldedAffineApply( + adjustedPackedSizes.push_back(affine::makeComposedFoldedAffineApply( rewriter, genericOp->getLoc(), d0.ceilDiv(s0) * s0, {loopRanges[adjustedPackedSizes.size()].size, rewriter.getIndexAttr(paddedSizesNextMultipleOf[i])})); @@ -1983,8 +1983,8 @@ TrackingListener listener(state, *this); IRRewriter rewriter(getContext(), &listener); SmallVector shapeSizes = - makeComposedFoldedMultiResultAffineApply(rewriter, loc, map, - allShapeSizes); + affine::makeComposedFoldedMultiResultAffineApply(rewriter, loc, map, + allShapeSizes); // If the shape size is dynamic, tile by 1. // Otherwise, do not tile (i.e. tile size 0). for (OpFoldResult shapeSize : shapeSizes) { @@ -3351,7 +3351,7 @@ void init() { declareDependentDialect(); declareDependentDialect(); - declareGeneratedDialect(); + declareGeneratedDialect(); declareGeneratedDialect(); declareGeneratedDialect(); declareGeneratedDialect(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp @@ -96,7 +96,7 @@ linalgOp, "failed to get loops map from shape sizes"); } SmallVector sizeBounds = - makeComposedFoldedMultiResultAffineApply( + affine::makeComposedFoldedMultiResultAffineApply( rewriter, linalgLoc, shapeSizesToLoopsMap, allShapeSizes); // The offsets and sizes from the slice operation only give you the tile diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp @@ -55,7 +55,7 @@ for (int64_t f : factors) basis.push_back(b.create(loc, b.getIndexAttr(f))); FailureOr> multiIndex = - delinearizeIndex(b, loc, index, basis); + affine::delinearizeIndex(b, loc, index, basis); assert(!failed(multiIndex) && "Failed to linearize img2col index"); return *multiIndex; } @@ -68,7 +68,8 @@ AffineExpr oExpr, fExpr; bindSymbols(b.getContext(), oExpr, fExpr); AffineMap convMap = AffineMap::get(0, 2, stride * oExpr + fExpr); - return makeComposedAffineApply(b, loc, convMap, ValueRange{oIndex, fIndex}); + return affine::makeComposedAffineApply(b, loc, convMap, + ValueRange{oIndex, fIndex}); } FailureOr> diff --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp @@ -111,8 +111,8 @@ cast(op.getOperation()).createFlatListOfOperandDims(b, loc); AffineMap map = op.getShapesToLoopsMap(); IRRewriter rewriter(b); - return makeComposedFoldedMultiResultAffineApply(rewriter, loc, map, - allShapesSizes); + return affine::makeComposedFoldedMultiResultAffineApply(rewriter, loc, map, + allShapesSizes); } /// Helper method to permute the list of `values` based on the `map`. diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -179,7 +179,7 @@ }); for (IndexOp indexOp : llvm::make_early_inc_range(producerBlock.getOps())) { - Value newIndex = rewriter.create( + Value newIndex = rewriter.create( producer.getLoc(), consumerToProducerLoopsMap.getSubMap(indexOp.getDim()), fusedIndices); mapper.map(indexOp.getResult(), newIndex); @@ -719,7 +719,7 @@ assert(!ShapedType::isDynamic(std::get<0>(it))); AffineExpr idx, acc; bindDims(rewriter.getContext(), idx, acc); - newIndex = rewriter.create( + newIndex = rewriter.create( indexOp.getLoc(), idx + acc * std::get<0>(it), ValueRange{std::get<1>(it), newIndex}); } @@ -1871,7 +1871,7 @@ populateFoldReshapeOpsByExpansionPatterns(patterns, defaultControlFn); // General canonicalization patterns. - AffineApplyOp::getCanonicalizationPatterns(patterns, context); + affine::AffineApplyOp::getCanonicalizationPatterns(patterns, context); GenericOp::getCanonicalizationPatterns(patterns, context); tensor::ExpandShapeOp::getCanonicalizationPatterns(patterns, context); tensor::CollapseShapeOp::getCanonicalizationPatterns(patterns, context); diff --git a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp @@ -462,7 +462,7 @@ // of the enclosing loops. for (auto forOp : packingLoops) { // Compute an upper bound `ubVal` for the upper bound of `forOp`. - FailureOr loopUb = reifyIndexValueBound( + FailureOr loopUb = affine::reifyIndexValueBound( rewriter, loc, presburger::BoundType::UB, forOp.getUpperBound(), /*stopCondition=*/ [&](Value v, std::optional d) { @@ -472,7 +472,8 @@ Operation *op = v.getDefiningOp(); if (!op) return true; - return !isa(op); + return !isa(op); }, /*closedUB=*/true); assert(succeeded(loopUb) && "could not get upper bound"); @@ -485,7 +486,7 @@ AffineExpr lb, ub, step; bindDims(rewriter.getContext(), lb, ub); bindSymbols(rewriter.getContext(), step); - Value res = rewriter.createOrFold( + Value res = rewriter.createOrFold( loc, (ub - lb).ceilDiv(step), ValueRange{forOp.getLowerBound(), ubVal, cast(forOp).getStep()}); @@ -519,7 +520,7 @@ Value ivVal = forOp.getInductionVar(), lbVal = forOp.getLowerBound(), stepVal = forOp.getStep(); auto loc = forOp->getLoc(); - return rewriter.createOrFold( + return rewriter.createOrFold( loc, (iv - lb).ceilDiv(step), ValueRange{ivVal, lbVal, stepVal}); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp @@ -94,7 +94,7 @@ auto loop = dyn_cast(transferRead->getParentOp()); LLVM_DEBUG(DBGS() << "Parent op: " << *transferRead->getParentOp() << "\n"); - if (!isa_and_nonnull(loop)) + if (!isa_and_nonnull(loop)) return WalkResult::advance(); LLVM_DEBUG(DBGS() << "Candidate read: " << *transferRead.getOperation() @@ -200,7 +200,7 @@ // the walk. return WalkResult::interrupt(); }) - .Case([&](AffineForOp affineForOp) { + .Case([&](affine::AffineForOp affineForOp) { auto newForOp = replaceForOpWithNewYields( b, affineForOp, transferRead.getVector(), SmallVector{transferWrite.getVector()}, diff --git a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp @@ -96,7 +96,7 @@ std::back_inserter(allIndices), [&](uint64_t dim) { return rewriter.create(indexOp->getLoc(), dim); }); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( indexOp, permutationMap.getSubMap(indexOp.getDim()), allIndices); } } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -49,8 +49,8 @@ for (auto e : map.getResults()) { auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e); SmallVector operands(vals.begin(), vals.end()); - canonicalizeMapAndOperands(&exprMap, &operands); - res.push_back(b.create(loc, exprMap, operands)); + affine::canonicalizeMapAndOperands(&exprMap, &operands); + res.push_back(b.create(loc, exprMap, operands)); } return res; } @@ -189,7 +189,7 @@ .Case([&](scf::ForOp forOp) { allIvs.push_back(forOp.getInductionVar()); }) - .Case([&](AffineForOp affineForOp) { + .Case([&](affine::AffineForOp affineForOp) { allIvs.push_back(affineForOp.getInductionVar()); }) .Default([&](Operation *op) { assert(false && "unexpected op"); }); @@ -208,10 +208,12 @@ template static FailureOr linalgOpToLoopsImpl(RewriterBase &rewriter, LinalgOp linalgOp) { - using LoadOpTy = std::conditional_t::value, - AffineLoadOp, memref::LoadOp>; - using StoreOpTy = std::conditional_t::value, - AffineStoreOp, memref::StoreOp>; + using LoadOpTy = + std::conditional_t::value, + affine::AffineLoadOp, memref::LoadOp>; + using StoreOpTy = + std::conditional_t::value, + affine::AffineStoreOp, memref::StoreOp>; // The flattened loopToOperandRangesMaps is expected to be an invertible // permutation map (which is asserted in the inverse calculation). @@ -284,11 +286,11 @@ /// other cases, it is replaced by its unique operand. struct FoldAffineOp : public RewritePattern { FoldAffineOp(MLIRContext *context) - : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {} + : RewritePattern(affine::AffineApplyOp::getOperationName(), 0, context) {} LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { - AffineApplyOp affineApplyOp = cast(op); + auto affineApplyOp = cast(op); auto map = affineApplyOp.getAffineMap(); if (map.getNumResults() != 1 || map.getNumInputs() > 1) return failure(); @@ -316,7 +318,7 @@ patterns.add>(context); memref::DimOp::getCanonicalizationPatterns(patterns, context); tensor::DimOp::getCanonicalizationPatterns(patterns, context); - AffineApplyOp::getCanonicalizationPatterns(patterns, context); + affine::AffineApplyOp::getCanonicalizationPatterns(patterns, context); patterns.add(context); // Just apply the patterns greedily. (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); @@ -328,7 +330,7 @@ registry.insert(); } void runOnOperation() override { - lowerLinalgToLoopsImpl(getOperation()); + lowerLinalgToLoopsImpl(getOperation()); } }; @@ -368,7 +370,7 @@ /// Emits a loop nest of `affine.for` with the proper body for `linalgOp`. FailureOr mlir::linalg::linalgOpToAffineLoops(RewriterBase &rewriter, LinalgOp linalgOp) { - return linalgOpToLoopsImpl(rewriter, linalgOp); + return linalgOpToLoopsImpl(rewriter, linalgOp); } /// Emits a loop nest of `scf.for` with the proper body for `linalgOp`. diff --git a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp @@ -81,14 +81,14 @@ // Adjust the split point so that it doesn't overflow the size. AffineExpr d0, d1, d2; bindDims(rewriter.getContext(), d0, d1, d2); - OpFoldResult minSplitPoint = makeComposedFoldedAffineMin( + OpFoldResult minSplitPoint = affine::makeComposedFoldedAffineMin( rewriter, op.getLoc(), AffineMap::inferFromExprList(ArrayRef{d0, d1 + d2}).front(), {splitPoint, offsets[dimension], sizes[dimension]}); // Compute the size of the second part. Return early if the second part would // have an empty iteration space. - OpFoldResult remainingSize = makeComposedFoldedAffineApply( + OpFoldResult remainingSize = affine::makeComposedFoldedAffineApply( rewriter, op.getLoc(), d0 + d1 - d2, {iterationSpace[dimension].offset, iterationSpace[dimension].size, minSplitPoint}); @@ -121,7 +121,7 @@ }); // Create the second part. - OpFoldResult totalOffset = makeComposedFoldedAffineApply( + OpFoldResult totalOffset = affine::makeComposedFoldedAffineApply( rewriter, op.getLoc(), d0 + d1, {offsets[dimension], minSplitPoint}); SmallVector secondResults; TilingInterface secondPart = diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -39,6 +39,7 @@ } // namespace mlir using namespace mlir; +using namespace mlir::affine; using namespace mlir::linalg; using namespace mlir::scf; @@ -178,7 +179,7 @@ AffineExpr s1 = b.getAffineSymbolExpr(1); AffineExpr s2 = b.getAffineSymbolExpr(2); auto apply = [&](AffineExpr expr, ValueRange values) -> Value { - return makeComposedAffineApply(b, b.getLoc(), expr, values); + return affine::makeComposedAffineApply(b, b.getLoc(), expr, values); }; Value a = apply(s0.floorDiv(s1), {tripCount, divisorValue}); Value t = apply((s0 + s1 - 1).floorDiv(s1), {targetSizeValue, divisorValue}); @@ -228,7 +229,7 @@ /// Build an `affine_max` of all the `vals`. static OpFoldResult buildMax(OpBuilder &b, Location loc, ArrayRef vals) { - return makeComposedFoldedAffineMax( + return affine::makeComposedFoldedAffineMax( b, loc, AffineMap::getMultiDimIdentityMap(vals.size(), loc.getContext()), vals); } @@ -236,7 +237,7 @@ /// Build an `affine_min` of all the `vals`. static OpFoldResult buildMin(OpBuilder &b, Location loc, ArrayRef vals) { - return makeComposedFoldedAffineMin( + return affine::makeComposedFoldedAffineMin( b, loc, AffineMap::getMultiDimIdentityMap(vals.size(), loc.getContext()), vals); } @@ -968,10 +969,10 @@ void mlir::linalg::populateLinalgTilingCanonicalizationPatterns( RewritePatternSet &patterns) { auto *ctx = patterns.getContext(); - AffineApplyOp::getCanonicalizationPatterns(patterns, ctx); - AffineForOp::getCanonicalizationPatterns(patterns, ctx); - AffineMinOp::getCanonicalizationPatterns(patterns, ctx); - AffineMaxOp::getCanonicalizationPatterns(patterns, ctx); + affine::AffineApplyOp::getCanonicalizationPatterns(patterns, ctx); + affine::AffineForOp::getCanonicalizationPatterns(patterns, ctx); + affine::AffineMinOp::getCanonicalizationPatterns(patterns, ctx); + affine::AffineMaxOp::getCanonicalizationPatterns(patterns, ctx); arith::ConstantIndexOp::getCanonicalizationPatterns(patterns, ctx); memref::SubViewOp::getCanonicalizationPatterns(patterns, ctx); diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp @@ -37,7 +37,7 @@ for (auto result : indexingMap.getResults()) { AffineMap m = AffineMap::get(indexingMap.getNumDims(), indexingMap.getNumSymbols(), result); - Value v = b.create(loc, m, ivs); + Value v = b.create(loc, m, ivs); indices.push_back(v); } return indices; @@ -104,8 +104,8 @@ return llvm::to_vector( llvm::map_range(map.getResults(), [&](AffineExpr loopExpr) { - OpFoldResult ofr = - makeComposedFoldedAffineApply(b, loc, loopExpr, allShapesSizes); + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( + b, loc, loopExpr, allShapesSizes); return Range{b.getIndexAttr(0), ofr, b.getIndexAttr(1)}; })); } @@ -147,7 +147,7 @@ bindDims(b.getContext(), d0); SmallVector subShapeSizes = llvm::to_vector(llvm::map_range(sizes, [&](OpFoldResult ofr) { - return makeComposedFoldedAffineApply(b, loc, d0 - 1, ofr); + return affine::makeComposedFoldedAffineApply(b, loc, d0 - 1, ofr); })); OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1374,11 +1374,11 @@ /// Converts affine.apply Ops to arithmetic operations. static void convertAffineApply(RewriterBase &rewriter, LinalgOp linalgOp) { OpBuilder::InsertionGuard g(rewriter); - auto toReplace = linalgOp.getBlock()->getOps(); + auto toReplace = linalgOp.getBlock()->getOps(); for (auto op : make_early_inc_range(toReplace)) { rewriter.setInsertionPoint(op); - auto expanded = expandAffineExpr( + auto expanded = affine::expandAffineExpr( rewriter, op->getLoc(), op.getAffineMap().getResult(0), op.getOperands().take_front(op.getAffineMap().getNumDims()), op.getOperands().take_back(op.getAffineMap().getNumSymbols())); @@ -1868,8 +1868,8 @@ // Case 2: Both values are identical AffineMinOps. (Should not happen if // CSE is run.) - auto minOp1 = v1.getDefiningOp(); - auto minOp2 = v2.getDefiningOp(); + auto minOp1 = v1.getDefiningOp(); + auto minOp2 = v2.getDefiningOp(); if (minOp1 && minOp2 && minOp1.getAffineMap() == minOp2.getAffineMap() && minOp1.getOperands() == minOp2.getOperands()) continue; diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -42,6 +42,7 @@ using namespace mlir; using namespace presburger; +using namespace mlir::affine; using namespace mlir::linalg; using namespace mlir::scf; @@ -456,11 +457,11 @@ constantSteps.push_back(op.value()); } - mlir::buildAffineLoopNest(b, loc, lbs, ubs, constantSteps, - [&](OpBuilder &b, Location loc, ValueRange ivs) { - bodyBuilderFn(b, loc, ivs, - linalgOp->getOperands()); - }); + affine::buildAffineLoopNest(b, loc, lbs, ubs, constantSteps, + [&](OpBuilder &b, Location loc, ValueRange ivs) { + bodyBuilderFn(b, loc, ivs, + linalgOp->getOperands()); + }); } /// Update the `lb`, `ub` and `step` to get per processor `lb`, `ub` and `step`. @@ -470,8 +471,9 @@ AffineExpr d0, d1; bindDims(b.getContext(), d0, d1); AffineExpr s0 = getAffineSymbolExpr(0, b.getContext()); - lb = makeComposedAffineApply(b, loc, d0 + d1 * s0, {lb, procId, step}); - step = makeComposedAffineApply(b, loc, d0 * s0, {nprocs, step}); + lb = + affine::makeComposedAffineApply(b, loc, d0 + d1 * s0, {lb, procId, step}); + step = affine::makeComposedAffineApply(b, loc, d0 * s0, {nprocs, step}); } /// Generates a loop nest consisting of scf.parallel and scf.for, depending diff --git a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp --- a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp +++ b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp @@ -110,7 +110,7 @@ void init() { declareDependentDialect(); - declareGeneratedDialect(); + declareGeneratedDialect(); declareGeneratedDialect(); declareGeneratedDialect(); declareGeneratedDialect(); diff --git a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp @@ -111,8 +111,8 @@ } AffineMap map = AffineMap::get(0, affineApplyOperands.size(), expr); - Value result = rewriter.create(op.getLoc(), map, - affineApplyOperands); + Value result = rewriter.create( + op.getLoc(), map, affineApplyOperands); offsets.push_back(result); } } diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp @@ -32,7 +32,9 @@ #include "mlir/Dialect/MemRef/Transforms/Passes.h.inc" } // namespace memref } // namespace mlir + using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp @@ -195,8 +195,8 @@ AffineExpr s1 = rewriter.getAffineSymbolExpr(1); for (auto [srcSize, indice] : llvm::zip(srcSizes, indices)) { - finalSizes.push_back(makeComposedFoldedAffineApply(rewriter, loc, s0 - s1, - {srcSize, indice})); + finalSizes.push_back(affine::makeComposedFoldedAffineApply( + rewriter, loc, s0 - s1, {srcSize, indice})); } return finalSizes; } diff --git a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp @@ -85,7 +85,7 @@ // Creating maximally folded and composd affine.apply composes better with // other transformations without interleaving canonicalization passes. - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, loc, AffineMap::get(/*numDims=*/groupSize, /*numSymbols=*/0, srcIndexExpr), @@ -135,7 +135,7 @@ // Construct the AffineApplyOp for each delinearizingExpr. for (int64_t i = 0; i < groupSize; i++) { - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, loc, AffineMap::get(/*numDims=*/1, /*numSymbols=*/0, delinearizingExprs[i]), @@ -150,7 +150,7 @@ int64_t srcRank = collapseShapeOp.getViewSource().getType().cast().getRank(); for (int64_t i = 0; i < srcRank; i++) { - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, loc, zeroAffineMap, dynamicIndices); sourceIndices.push_back( getValueOrCreateConstantIndexOp(rewriter, loc, ofr)); @@ -268,13 +268,13 @@ // Resolve sizes according to dropped dims. SmallVector resolvedSizes; llvm::SmallBitVector srcDroppedDims = srcSubView.getDroppedDims(); - resolveSizesIntoOpWithSizes(srcSubView.getMixedSizes(), - subView.getMixedSizes(), srcDroppedDims, - resolvedSizes); + affine::resolveSizesIntoOpWithSizes(srcSubView.getMixedSizes(), + subView.getMixedSizes(), srcDroppedDims, + resolvedSizes); // Resolve offsets according to source offsets and strides. SmallVector resolvedOffsets; - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, subView.getLoc(), srcSubView.getMixedOffsets(), srcSubView.getMixedStrides(), srcDroppedDims, subView.getMixedOffsets(), resolvedOffsets); @@ -309,7 +309,7 @@ llvm::map_range(indices, [](Value v) -> OpFoldResult { return v; }))); SmallVector expandedIndices; for (unsigned i = 0, e = affineMap.getNumResults(); i < e; i++) { - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, loc, affineMap.getSubMap({i}), indicesOfr); expandedIndices.push_back( getValueOrCreateConstantIndexOp(rewriter, loc, ofr)); @@ -371,22 +371,23 @@ loadOp.getIndices().end()); // For affine ops, we need to apply the map to get the operands to get the // "actual" indices. - if (auto affineLoadOp = dyn_cast(loadOp.getOperation())) { + if (auto affineLoadOp = + dyn_cast(loadOp.getOperation())) { AffineMap affineMap = affineLoadOp.getAffineMap(); auto expandedIndices = calculateExpandedAccessIndices( affineMap, indices, loadOp.getLoc(), rewriter); indices.assign(expandedIndices.begin(), expandedIndices.end()); } SmallVector sourceIndices; - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, loadOp.getLoc(), subViewOp.getMixedOffsets(), subViewOp.getMixedStrides(), subViewOp.getDroppedDims(), indices, sourceIndices); llvm::TypeSwitch(loadOp) - .Case([&](AffineLoadOp op) { - rewriter.replaceOpWithNewOp(loadOp, subViewOp.getSource(), - sourceIndices); + .Case([&](affine::AffineLoadOp op) { + rewriter.replaceOpWithNewOp( + loadOp, subViewOp.getSource(), sourceIndices); }) .Case([&](memref::LoadOp op) { rewriter.replaceOpWithNewOp( @@ -422,7 +423,8 @@ loadOp.getIndices().end()); // For affine ops, we need to apply the map to get the operands to get the // "actual" indices. - if (auto affineLoadOp = dyn_cast(loadOp.getOperation())) { + if (auto affineLoadOp = + dyn_cast(loadOp.getOperation())) { AffineMap affineMap = affineLoadOp.getAffineMap(); auto expandedIndices = calculateExpandedAccessIndices( affineMap, indices, loadOp.getLoc(), rewriter); @@ -433,7 +435,7 @@ loadOp.getLoc(), rewriter, expandShapeOp, indices, sourceIndices))) return failure(); llvm::TypeSwitch(loadOp) - .Case([&](auto op) { + .Case([&](auto op) { rewriter.replaceOpWithNewOp( loadOp, expandShapeOp.getViewSource(), sourceIndices); }) @@ -454,7 +456,8 @@ loadOp.getIndices().end()); // For affine ops, we need to apply the map to get the operands to get the // "actual" indices. - if (auto affineLoadOp = dyn_cast(loadOp.getOperation())) { + if (auto affineLoadOp = + dyn_cast(loadOp.getOperation())) { AffineMap affineMap = affineLoadOp.getAffineMap(); auto expandedIndices = calculateExpandedAccessIndices( affineMap, indices, loadOp.getLoc(), rewriter); @@ -465,7 +468,7 @@ loadOp.getLoc(), rewriter, collapseShapeOp, indices, sourceIndices))) return failure(); llvm::TypeSwitch(loadOp) - .Case([&](auto op) { + .Case([&](auto op) { rewriter.replaceOpWithNewOp( loadOp, collapseShapeOp.getViewSource(), sourceIndices); }) @@ -491,21 +494,22 @@ storeOp.getIndices().end()); // For affine ops, we need to apply the map to get the operands to get the // "actual" indices. - if (auto affineStoreOp = dyn_cast(storeOp.getOperation())) { + if (auto affineStoreOp = + dyn_cast(storeOp.getOperation())) { AffineMap affineMap = affineStoreOp.getAffineMap(); auto expandedIndices = calculateExpandedAccessIndices( affineMap, indices, storeOp.getLoc(), rewriter); indices.assign(expandedIndices.begin(), expandedIndices.end()); } SmallVector sourceIndices; - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, storeOp.getLoc(), subViewOp.getMixedOffsets(), subViewOp.getMixedStrides(), subViewOp.getDroppedDims(), indices, sourceIndices); llvm::TypeSwitch(storeOp) - .Case([&](AffineStoreOp op) { - rewriter.replaceOpWithNewOp( + .Case([&](affine::AffineStoreOp op) { + rewriter.replaceOpWithNewOp( op, op.getValue(), subViewOp.getSource(), sourceIndices); }) .Case([&](memref::StoreOp op) { @@ -543,7 +547,8 @@ storeOp.getIndices().end()); // For affine ops, we need to apply the map to get the operands to get the // "actual" indices. - if (auto affineStoreOp = dyn_cast(storeOp.getOperation())) { + if (auto affineStoreOp = + dyn_cast(storeOp.getOperation())) { AffineMap affineMap = affineStoreOp.getAffineMap(); auto expandedIndices = calculateExpandedAccessIndices( affineMap, indices, storeOp.getLoc(), rewriter); @@ -554,7 +559,7 @@ storeOp.getLoc(), rewriter, expandShapeOp, indices, sourceIndices))) return failure(); llvm::TypeSwitch(storeOp) - .Case([&](auto op) { + .Case([&](auto op) { rewriter.replaceOpWithNewOp(storeOp, storeOp.getValue(), expandShapeOp.getViewSource(), sourceIndices); @@ -576,7 +581,8 @@ storeOp.getIndices().end()); // For affine ops, we need to apply the map to get the operands to get the // "actual" indices. - if (auto affineStoreOp = dyn_cast(storeOp.getOperation())) { + if (auto affineStoreOp = + dyn_cast(storeOp.getOperation())) { AffineMap affineMap = affineStoreOp.getAffineMap(); auto expandedIndices = calculateExpandedAccessIndices( affineMap, indices, storeOp.getLoc(), rewriter); @@ -587,7 +593,7 @@ storeOp.getLoc(), rewriter, collapseShapeOp, indices, sourceIndices))) return failure(); llvm::TypeSwitch(storeOp) - .Case([&](auto op) { + .Case([&](auto op) { rewriter.replaceOpWithNewOp( storeOp, storeOp.getValue(), collapseShapeOp.getViewSource(), sourceIndices); @@ -617,7 +623,7 @@ if (srcSubViewOp) { LLVM_DEBUG(DBGS() << "srcSubViewOp : " << srcSubViewOp << "\n"); - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, copyOp.getLoc(), srcSubViewOp.getMixedOffsets(), srcSubViewOp.getMixedStrides(), srcSubViewOp.getDroppedDims(), srcindices, foldedSrcIndices); @@ -630,7 +636,7 @@ if (dstSubViewOp) { LLVM_DEBUG(DBGS() << "dstSubViewOp : " << dstSubViewOp << "\n"); - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, copyOp.getLoc(), dstSubViewOp.getMixedOffsets(), dstSubViewOp.getMixedStrides(), dstSubViewOp.getDroppedDims(), dstindices, foldedDstIndices); @@ -650,21 +656,21 @@ } void memref::populateFoldMemRefAliasOpPatterns(RewritePatternSet &patterns) { - patterns.add, + patterns.add, LoadOpOfSubViewOpFolder, LoadOpOfSubViewOpFolder, LoadOpOfSubViewOpFolder, - StoreOpOfSubViewOpFolder, + StoreOpOfSubViewOpFolder, StoreOpOfSubViewOpFolder, StoreOpOfSubViewOpFolder, StoreOpOfSubViewOpFolder, - LoadOpOfExpandShapeOpFolder, + LoadOpOfExpandShapeOpFolder, LoadOpOfExpandShapeOpFolder, - StoreOpOfExpandShapeOpFolder, + StoreOpOfExpandShapeOpFolder, StoreOpOfExpandShapeOpFolder, - LoadOpOfCollapseShapeOpFolder, + LoadOpOfCollapseShapeOpFolder, LoadOpOfCollapseShapeOpFolder, - StoreOpOfCollapseShapeOpFolder, + StoreOpOfCollapseShapeOpFolder, StoreOpOfCollapseShapeOpFolder, SubViewOfSubViewFolder, NvgpuAsyncCopyOpSubViewOpFolder>( patterns.getContext()); diff --git a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp @@ -190,7 +190,7 @@ Value stepVal = getValueOrCreateConstantIndexOp(rewriter, loc, *singleStep); AffineExpr iv, lb, step; bindDims(rewriter.getContext(), iv, lb, step); - Value bufferIndex = makeComposedAffineApply( + Value bufferIndex = affine::makeComposedAffineApply( rewriter, loc, ((iv - lb).floorDiv(step)) % multiBufferingFactor, {ivVal, lbVal, stepVal}); LLVM_DEBUG(DBGS() << "--multi-buffered indexing: " << bufferIndex << "\n"); diff --git a/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp @@ -29,6 +29,7 @@ #define DEBUG_TYPE "normalize-memrefs" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp --- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp +++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp @@ -20,6 +20,7 @@ #include "mlir/Dialect/Vector/IR/VectorOps.h" using namespace mlir; +using namespace mlir::affine; //===----------------------------------------------------------------------===// // GetParentForOp @@ -298,7 +299,7 @@ using Base::Base; void init() { - declareGeneratedDialect(); + declareGeneratedDialect(); declareGeneratedDialect(); registerTransformOps< diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp --- a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp @@ -179,8 +179,8 @@ RewritePatternSet &patterns) { MLIRContext *ctx = patterns.getContext(); patterns - .add, - AffineOpSCFCanonicalizationPattern, + .add, + AffineOpSCFCanonicalizationPattern, DimOfIterArgFolder, DimOfIterArgFolder, DimOfLoopResultFolder, DimOfLoopResultFolder>(ctx); diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp --- a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp @@ -34,6 +34,7 @@ } // namespace mlir using namespace mlir; +using namespace mlir::affine; using scf::ForOp; using scf::ParallelOp; diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp --- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp @@ -130,8 +130,8 @@ // Otherwise, we dynamically compute the bound for // each iteration of the outer loop. newBounds.push_back( - b.create(op.getLoc(), b.getIndexType(), minMap, - ValueRange{newStep, upperBound, iv})); + b.create(op.getLoc(), b.getIndexType(), minMap, + ValueRange{newStep, upperBound, iv})); } auto innerLoop = b.create( op.getLoc(), SmallVector(newBounds.size(), zero), newBounds, diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp --- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp @@ -102,7 +102,7 @@ bindSymbols(b.getContext(), s0, s1); AffineMap minMap = AffineMap::get(1, 2, {s0, s1 - d0}, b.getContext()); Value size = getValueOrCreateConstantIndexOp(b, loc, loopRange.size); - return makeComposedFoldedAffineMin( + return affine::makeComposedFoldedAffineMin( b, loc, minMap, SmallVector{iv, tileSize, size}); } diff --git a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp --- a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp +++ b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp @@ -27,6 +27,7 @@ #define DEBUG_TYPE "mlir-scf-affine-utils" using namespace mlir; +using namespace affine; using namespace presburger; LogicalResult scf::matchForLikeLoop(Value iv, OpFoldResult &lb, @@ -68,7 +69,7 @@ RewriterBase::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); FailureOr simplified = - mlir::simplifyConstrainedMinMaxOp(op, std::move(constraints)); + affine::simplifyConstrainedMinMaxOp(op, std::move(constraints)); if (failed(simplified)) return failure(); return rewriter.replaceOpWithNewOp( diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp @@ -91,8 +91,8 @@ {rewriter.getAffineSymbolExpr(0), rewriter.getAffineDimExpr(0) - rewriter.getAffineDimExpr(1)}, rewriter.getContext()); - Value end = - rewriter.createOrFold(loc, min, ValueRange{hi, iv, step}); + Value end = rewriter.createOrFold( + loc, min, ValueRange{hi, iv, step}); return rewriter.create(loc, mtp, end); } diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp @@ -55,9 +55,9 @@ AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos); expr = (expr ? expr * currExpr : currExpr); } - return applyMapToValues(builder, loc, - AffineMap::get(0, endPos - startPos + 1, expr), - dynamicDims)[0]; + return affine::applyMapToValues( + builder, loc, AffineMap::get(0, endPos - startPos + 1, expr), + dynamicDims)[0]; } /// Given the `src` of a collapsing reshape op and its reassociation maps, @@ -103,7 +103,7 @@ linearizedStaticDim *= d.value(); } Value sourceDim = builder.create(loc, src, sourceDimPos); - return applyMapToValues( + return affine::applyMapToValues( builder, loc, AffineMap::get( 0, 1, builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)), @@ -190,7 +190,7 @@ }; addOpFoldResult(lowPad[dim]); addOpFoldResult(highPad[dim]); - shapes.push_back(applyMapToValues( + shapes.push_back(affine::applyMapToValues( b, loc, AffineMap::get(1, numSymbols, expr), mapOperands)[0]); } reifiedReturnShapes.emplace_back(std::move(shapes)); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -1528,8 +1528,8 @@ rewriter.create(dimOp.getLoc(), expandShapeOp.getSrc(), srcDim); AffineExpr expr; bindSymbols(dimOp.getContext(), expr); - rewriter.replaceOpWithNewOp(dimOp, expr.floorDiv(product), - srcDimSz); + rewriter.replaceOpWithNewOp( + dimOp, expr.floorDiv(product), srcDimSz); return success(); } }; @@ -1567,7 +1567,8 @@ syms.push_back(rewriter.getAffineSymbolExpr(it.index())); product = product ? product * syms.back() : syms.back(); } - rewriter.replaceOpWithNewOp(dimOp, product, srcDimSizes); + rewriter.replaceOpWithNewOp(dimOp, product, + srcDimSizes); return success(); } }; @@ -3565,7 +3566,7 @@ bindSymbols(builder.getContext(), s0, s1); AffineExpr ceilDivExpr = s0.ceilDiv(s1); for (auto tiledDim : llvm::enumerate(innerDimsPos)) { - resultDims[tiledDim.value()] = makeComposedFoldedAffineApply( + resultDims[tiledDim.value()] = affine::makeComposedFoldedAffineApply( builder, loc, ceilDivExpr, {resultDims[tiledDim.value()], innerTileSizes[tiledDim.index()]}); } @@ -3610,7 +3611,8 @@ AffineExpr dim0, dim1; bindDims(b.getContext(), dim0, dim1); auto ceilDiv = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult { - return makeComposedFoldedAffineApply(b, loc, dim0.ceilDiv(dim1), {v1, v2}); + return affine::makeComposedFoldedAffineApply(b, loc, dim0.ceilDiv(dim1), + {v1, v2}); }; SmallVector mixedSizes; @@ -3816,7 +3818,7 @@ AffineExpr sym0, sym1; bindSymbols(b.getContext(), sym0, sym1); auto dimMul = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult { - return makeComposedFoldedAffineApply(b, loc, sym0 * sym1, {v1, v2}); + return affine::makeComposedFoldedAffineApply(b, loc, sym0 * sym1, {v1, v2}); }; SmallVector mixedSizes; diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @@ -139,8 +139,8 @@ tensor::createDimValues(b, loc, packOp.getSource()); SmallVector inputIndices, inputSizes; for (auto dim : llvm::seq(0, inputRank)) { - using AV = AffineValueExpr; - AffineBuilder ab(b, loc); + using AV = affine::AffineValueExpr; + affine::AffineBuilder ab(b, loc); AffineExpr dim0, dim1, sym; bindDims(b.getContext(), dim0, dim1); bindSymbols(b.getContext(), sym); @@ -255,8 +255,8 @@ } Location loc = unpackOp.getLoc(); - using AV = AffineValueExpr; - AffineBuilder ab(b, loc); + using AV = affine::AffineValueExpr; + affine::AffineBuilder ab(b, loc); AffineExpr dim0, dim1, sym0; bindDims(b.getContext(), dim0, dim1); bindSymbols(b.getContext(), sym0); @@ -303,12 +303,12 @@ return info; } - DivModValue firstCoord = - getDivMod(b, loc, getValueOrCreateConstantIndexOp(b, loc, tileOffset), - getValueOrCreateConstantIndexOp(b, loc, innerTileSize)); + affine::DivModValue firstCoord = affine::getDivMod( + b, loc, getValueOrCreateConstantIndexOp(b, loc, tileOffset), + getValueOrCreateConstantIndexOp(b, loc, innerTileSize)); OpFoldResult tileExclusiveBound = ab.add(AV(dim0).bind(tileOffset), AV(dim1).bind(tileSize)); - DivModValue lastCoord = getDivMod( + affine::DivModValue lastCoord = affine::getDivMod( b, loc, getValueOrCreateConstantIndexOp( b, loc, @@ -468,21 +468,21 @@ // Add two integers. auto addMap = AffineMap::get(2, 0, {dim0 + dim1}); auto add = [&](OpFoldResult v1, OpFoldResult v2) { - return makeComposedFoldedAffineApply(b, loc, addMap, {v1, v2}); + return affine::makeComposedFoldedAffineApply(b, loc, addMap, {v1, v2}); }; // Subtract two integers. auto subMap = AffineMap::get(2, 0, {dim0 - dim1}); auto sub = [&](OpFoldResult v1, OpFoldResult v2) { - return makeComposedFoldedAffineApply(b, loc, subMap, {v1, v2}); + return affine::makeComposedFoldedAffineApply(b, loc, subMap, {v1, v2}); }; // Take the minimum of two integers. auto idMap = AffineMap::getMultiDimIdentityMap(2, b.getContext()); auto min = [&](OpFoldResult v1, OpFoldResult v2) { - return makeComposedFoldedAffineMin(b, loc, idMap, {v1, v2}); + return affine::makeComposedFoldedAffineMin(b, loc, idMap, {v1, v2}); }; // Take the maximum of two integers. auto max = [&](OpFoldResult v1, OpFoldResult v2) { - return makeComposedFoldedAffineMax(b, loc, idMap, {v1, v2}); + return affine::makeComposedFoldedAffineMax(b, loc, idMap, {v1, v2}); }; // Zero index-typed integer. OpFoldResult zero = b.getIndexAttr(0); diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -893,7 +893,7 @@ AffineExpr s0, s1, s2; bindSymbols(op->getContext(), s0, s1, s2); AffineExpr sumExpr = s0 + s1 + s2; - Value sum = rewriter.create( + Value sum = rewriter.create( loc, sumExpr, ValueRange{srcDim, lowPad, highPad}); dynamicSizes.push_back(sum); } diff --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp @@ -23,6 +23,7 @@ #include "llvm/ADT/STLExtras.h" using namespace mlir; +using namespace mlir::affine; using namespace mlir::tensor; /// Get the dimension size of a value of RankedTensor type at the @@ -61,7 +62,7 @@ assert(dim < sliceParams.size() && "slice should be non rank-reducing"); return std::make_pair( dim, - makeComposedAffineApply( + affine::makeComposedAffineApply( b, loc, s0 + d0 * s1, {indexValue, getValueOrCreateConstantIndexOp(b, loc, sliceParams[dim].offset), diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp @@ -100,7 +100,7 @@ SmallVector indices(readOp.getIndices().begin(), readOp.getIndices().end()); SmallVector sourceIndices; - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, readOp.getLoc(), extractSliceOp.getMixedOffsets(), extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(), indices, sourceIndices); @@ -132,7 +132,7 @@ SmallVector indices(writeOp.getIndices().begin(), writeOp.getIndices().end()); SmallVector sourceIndices; - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, writeOp.getLoc(), insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedStrides(), insertSliceOp.getDroppedDims(), indices, sourceIndices); @@ -187,9 +187,9 @@ // Note: the "insertSlice" case is symmetrical to the extract/subview case: // `insertSliceOp` is passed as the "source" and `sourceInsertSliceOp` is // passed as the destination to the helper function. - resolveSizesIntoOpWithSizes(insertSliceOp.getMixedSizes(), - sourceInsertSliceOp.getMixedSizes(), - droppedDims, resolvedSizes); + affine::resolveSizesIntoOpWithSizes(insertSliceOp.getMixedSizes(), + sourceInsertSliceOp.getMixedSizes(), + droppedDims, resolvedSizes); // If we are inside an InParallel region, temporarily set the insertion // point outside: only tensor.parallel_insert_slice ops are allowed in @@ -204,7 +204,7 @@ // Note: the "insertSlice" case is symmetrical to the extract/subview case: // `insertSliceOp` is passed as the "source" and `sourceInsertSliceOp` is // passed as the destination to the helper function. - resolveIndicesIntoOpWithOffsetsAndStrides( + affine::resolveIndicesIntoOpWithOffsetsAndStrides( rewriter, insertSliceOp.getLoc(), insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedStrides(), droppedDims, sourceInsertSliceOp.getMixedOffsets(), resolvedOffsets); diff --git a/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp @@ -29,9 +29,9 @@ return failure(); SmallVector newOffsets, newSizes, newStrides; - if (failed(mergeOffsetsSizesAndStrides(rewriter, nextOp.getLoc(), prevOp, - nextOp, prevOp.getDroppedDims(), - newOffsets, newSizes, newStrides))) + if (failed(affine::mergeOffsetsSizesAndStrides( + rewriter, nextOp.getLoc(), prevOp, nextOp, prevOp.getDroppedDims(), + newOffsets, newSizes, newStrides))) return failure(); rewriter.replaceOpWithNewOp(nextOp, nextOp.getType(), diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp --- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp @@ -35,7 +35,8 @@ bindDims(b.getContext(), d0); auto dimOp = b.createOrFold(loc, source, en.index()); high[en.index()] = - makeComposedAffineApply(b, loc, en.value() - d0, {dimOp}).getResult(); + affine::makeComposedAffineApply(b, loc, en.value() - d0, {dimOp}) + .getResult(); } return b.create(loc, type, source, low, high, pad, nofold); } diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp @@ -72,8 +72,8 @@ Value buildDistributedOffset(RewriterBase &b, Location loc, int64_t index) { int64_t distributedSize = distributedVectorType.getDimSize(index); AffineExpr tid = getAffineSymbolExpr(0, b.getContext()); - return b.createOrFold(loc, tid * distributedSize, - ArrayRef{laneId}); + return b.createOrFold(loc, tid * distributedSize, + ArrayRef{laneId}); } /// Create a store during the process of distributing the @@ -513,9 +513,9 @@ unsigned vectorPos = std::get<1>(it).cast().getPosition(); auto scale = rewriter.getAffineConstantExpr(targetType.getDimSize(vectorPos)); - indices[indexPos] = - makeComposedAffineApply(rewriter, loc, d0 + scale * d1, - {indices[indexPos], newWarpOp.getLaneid()}); + indices[indexPos] = affine::makeComposedAffineApply( + rewriter, loc, d0 + scale * d1, + {indices[indexPos], newWarpOp.getLaneid()}); } newWriteOp.getIndicesMutable().assign(indices); @@ -753,9 +753,9 @@ unsigned vectorPos = std::get<1>(it).cast().getPosition(); int64_t scale = distributedVal.getType().cast().getDimSize(vectorPos); - indices[indexPos] = - makeComposedAffineApply(rewriter, read.getLoc(), d0 + scale * d1, - {indices[indexPos], warpOp.getLaneid()}); + indices[indexPos] = affine::makeComposedAffineApply( + rewriter, read.getLoc(), d0 + scale * d1, + {indices[indexPos], warpOp.getLaneid()}); } Value newRead = rewriter.create( read.getLoc(), distributedVal.getType(), read.getSource(), indices, @@ -1046,15 +1046,15 @@ int64_t elementsPerLane = distributedVecType.getShape()[0]; AffineExpr sym0 = getAffineSymbolExpr(0, rewriter.getContext()); // tid of extracting thread: pos / elementsPerLane - Value broadcastFromTid = rewriter.create( + Value broadcastFromTid = rewriter.create( loc, sym0.ceilDiv(elementsPerLane), extractOp.getPosition()); // Extract at position: pos % elementsPerLane Value pos = elementsPerLane == 1 ? rewriter.create(loc, 0).getResult() : rewriter - .create(loc, sym0 % elementsPerLane, - extractOp.getPosition()) + .create(loc, sym0 % elementsPerLane, + extractOp.getPosition()) .getResult(); Value extracted = rewriter.create(loc, distributedVec, pos); @@ -1119,14 +1119,15 @@ int64_t elementsPerLane = distrType.getShape()[0]; AffineExpr sym0 = getAffineSymbolExpr(0, rewriter.getContext()); // tid of extracting thread: pos / elementsPerLane - Value insertingLane = rewriter.create( + Value insertingLane = rewriter.create( loc, sym0.ceilDiv(elementsPerLane), newPos); // Insert position: pos % elementsPerLane Value pos = elementsPerLane == 1 ? rewriter.create(loc, 0).getResult() : rewriter - .create(loc, sym0 % elementsPerLane, newPos) + .create(loc, sym0 % elementsPerLane, + newPos) .getResult(); Value isInsertingLane = rewriter.create( loc, arith::CmpIPredicate::eq, newWarpOp.getLaneid(), insertingLane); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp @@ -599,7 +599,7 @@ if (extractOp.getPosition()) { AffineExpr sym0, sym1; bindSymbols(extractOp.getContext(), sym0, sym1); - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, extractOp.getLoc(), sym0 + sym1, {newIndices[newIndices.size() - 1], extractOp.getPosition()}); if (ofr.is()) { @@ -663,7 +663,7 @@ int64_t offset = it.value().cast().getInt(); int64_t idx = newIndices.size() - extractOp.getPosition().size() + it.index(); - OpFoldResult ofr = makeComposedFoldedAffineApply( + OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, extractOp.getLoc(), rewriter.getAffineSymbolExpr(0) + offset, {newIndices[idx]}); if (ofr.is()) { diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp @@ -41,7 +41,7 @@ static std::optional extractConstantIndex(Value v) { if (auto cstOp = v.getDefiningOp()) return cstOp.value(); - if (auto affineApplyOp = v.getDefiningOp()) + if (auto affineApplyOp = v.getDefiningOp()) if (affineApplyOp.getAffineMap().isSingleConstant()) return affineApplyOp.getAffineMap().getSingleConstantResult(); return std::nullopt; @@ -76,8 +76,8 @@ int64_t vectorSize = xferOp.getVectorType().getDimSize(resultIdx); auto d0 = getAffineDimExpr(0, xferOp.getContext()); auto vs = getAffineConstantExpr(vectorSize, xferOp.getContext()); - Value sum = - makeComposedAffineApply(b, loc, d0 + vs, xferOp.indices()[indicesIdx]); + Value sum = affine::makeComposedAffineApply(b, loc, d0 + vs, + xferOp.indices()[indicesIdx]); Value cond = createFoldedSLE( b, sum, vector::createOrFoldDimOp(b, loc, xferOp.source(), indicesIdx)); if (!cond) @@ -208,7 +208,7 @@ SmallVector maps = AffineMap::inferFromExprList(MapList{{i - j, k}}); // affine_min(%dimMemRef - %index, %dimAlloc) - Value affineMin = b.create( + Value affineMin = b.create( loc, index.getType(), maps[0], ValueRange{dimMemRef, index, dimAlloc}); sizes.push_back(affineMin); }); @@ -449,7 +449,7 @@ parent = parent->getParentOp()) { if (parent->hasTrait()) scope = parent; - if (!isa(parent)) + if (!isa(parent)) break; } assert(scope && "Expected op to be inside automatic allocation scope"); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp @@ -118,7 +118,8 @@ auto expr = getAffineDimExpr(0, builder.getContext()) + getAffineConstantExpr(elementOffsets[dim.index()], ctx); auto map = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0, expr); - slicedIndices[pos] = builder.create(loc, map, indices[pos]); + slicedIndices[pos] = + builder.create(loc, map, indices[pos]); } return slicedIndices; } diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -77,8 +77,8 @@ for (auto kvp : enclosingLoopToVectorDim) { assert(kvp.second < perm.size()); - auto invariants = getInvariantAccesses( - cast(kvp.first).getInductionVar(), indices); + auto invariants = affine::getInvariantAccesses( + cast(kvp.first).getInductionVar(), indices); unsigned numIndices = indices.size(); unsigned countInvariantIndices = 0; for (unsigned dim = 0; dim < numIndices; ++dim) { @@ -119,7 +119,7 @@ /// Returns the enclosing AffineForOp, from closest to farthest. static SetVector getEnclosingforOps(Block *block) { - return getParentsOfType(block); + return getParentsOfType(block); } AffineMap mlir::makePermutationMap( diff --git a/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp b/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp --- a/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp +++ b/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp @@ -23,6 +23,7 @@ #define DEBUG_TYPE "memref-bound-check" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp --- a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp +++ b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp @@ -21,6 +21,7 @@ #define DEBUG_TYPE "test-memref-dependence-check" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp b/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp --- a/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp +++ b/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp @@ -23,6 +23,7 @@ #define PASS_NAME "test-affine-data-copy" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp b/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp --- a/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp +++ b/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp @@ -17,6 +17,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" using namespace mlir; +using namespace mlir::affine; #define DEBUG_TYPE "test-affine-parametric-tile" diff --git a/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp b/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp --- a/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp +++ b/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp @@ -19,6 +19,7 @@ #define PASS_NAME "test-affine-loop-unswitch" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp b/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp --- a/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp +++ b/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp @@ -21,6 +21,7 @@ #define PASS_NAME "test-decompose-affine-ops" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp b/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp --- a/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp +++ b/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp @@ -20,6 +20,7 @@ #define DEBUG_TYPE "test-loop-fusion" using namespace mlir; +using namespace mlir::affine; static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options"); @@ -60,10 +61,10 @@ static bool testDependenceCheck(AffineForOp srcForOp, AffineForOp dstForOp, unsigned i, unsigned j, unsigned loopDepth, unsigned maxLoopDepth) { - mlir::ComputationSliceState sliceUnion; + affine::ComputationSliceState sliceUnion; for (unsigned d = loopDepth + 1; d <= maxLoopDepth; ++d) { FusionResult result = - mlir::canFuseLoops(srcForOp, dstForOp, d, &sliceUnion); + affine::canFuseLoops(srcForOp, dstForOp, d, &sliceUnion); if (result.value == FusionResult::FailBlockDependence) { srcForOp->emitRemark("block-level dependence preventing" " fusion of loop nest ") @@ -85,7 +86,8 @@ } // Returns a string representation of 'sliceUnion'. -static std::string getSliceStr(const mlir::ComputationSliceState &sliceUnion) { +static std::string +getSliceStr(const affine::ComputationSliceState &sliceUnion) { std::string result; llvm::raw_string_ostream os(result); // Slice insertion point format [loop-depth, operation-block-index] @@ -114,8 +116,8 @@ unsigned i, unsigned j, unsigned loopDepth, unsigned maxLoopDepth) { for (unsigned d = loopDepth + 1; d <= maxLoopDepth; ++d) { - mlir::ComputationSliceState sliceUnion; - FusionResult result = mlir::canFuseLoops(forOpA, forOpB, d, &sliceUnion); + affine::ComputationSliceState sliceUnion; + FusionResult result = affine::canFuseLoops(forOpA, forOpB, d, &sliceUnion); if (result.value == FusionResult::Success) { forOpB->emitRemark("slice (") << " src loop: " << i << ", dst loop: " << j << ", depth: " << d @@ -137,10 +139,10 @@ unsigned loopDepth, unsigned maxLoopDepth) { for (unsigned d = loopDepth + 1; d <= maxLoopDepth; ++d) { - mlir::ComputationSliceState sliceUnion; - FusionResult result = mlir::canFuseLoops(forOpA, forOpB, d, &sliceUnion); + affine::ComputationSliceState sliceUnion; + FusionResult result = affine::canFuseLoops(forOpA, forOpB, d, &sliceUnion); if (result.value == FusionResult::Success) { - mlir::fuseLoops(forOpA, forOpB, sliceUnion); + affine::fuseLoops(forOpA, forOpB, sliceUnion); // Note: 'forOpA' is removed to simplify test output. A proper loop // fusion pass should check the data dependence graph and run memref // region analysis to ensure removing 'forOpA' is safe. diff --git a/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp b/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp --- a/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp +++ b/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp @@ -18,6 +18,7 @@ #include "mlir/Pass/Pass.h" using namespace mlir; +using namespace mlir::affine; namespace { struct TestLoopMappingPass @@ -33,7 +34,7 @@ explicit TestLoopMappingPass() = default; void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() override { diff --git a/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp b/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp --- a/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp +++ b/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp @@ -18,6 +18,7 @@ #define PASS_NAME "test-loop-permutation" using namespace mlir; +using namespace mlir::affine; namespace { diff --git a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp --- a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp +++ b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp @@ -19,6 +19,7 @@ #define PASS_NAME "test-affine-reify-value-bounds" using namespace mlir; +using namespace mlir::affine; using mlir::presburger::BoundType; namespace { @@ -36,8 +37,8 @@ TestReifyValueBounds(const TestReifyValueBounds &pass) : PassWrapper(pass){}; void getDependentDialects(DialectRegistry ®istry) const override { - registry - .insert(); + registry.insert(); } void runOnOperation() override; diff --git a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp --- a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp +++ b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp @@ -33,6 +33,7 @@ #define DEBUG_TYPE "affine-super-vectorizer-test" using namespace mlir; +using namespace mlir::affine; static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options"); @@ -99,7 +100,7 @@ void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) { auto f = getOperation(); - using matcher::Op; + using affine::matcher::Op; SmallVector shape(clTestVectorShapeRatio.begin(), clTestVectorShapeRatio.end()); auto subVectorType = @@ -109,7 +110,7 @@ auto filter = [&](Operation &op) { assert(subVectorType.getElementType().isF32() && "Only f32 supported for now"); - if (!matcher::operatesOnSuperVectorsOf(op, subVectorType)) { + if (!mlir::matcher::operatesOnSuperVectorsOf(op, subVectorType)) { return false; } if (op.getNumResults() != 1) { @@ -139,7 +140,7 @@ } static NestedPattern patternTestSlicingOps() { - using matcher::Op; + using affine::matcher::Op; // Match all operations with the kTestSlicingOpName name. auto filter = [](Operation &op) { // Just use a custom op name for this test, it makes life easier. @@ -202,7 +203,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) { auto f = getOperation(); - using matcher::Op; + using affine::matcher::Op; auto pattern = Op(customOpWithAffineMapAttribute); SmallVector matches; pattern.match(f, &matches); diff --git a/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp b/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp --- a/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp +++ b/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp @@ -33,7 +33,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestGpuMemoryPromotionPass) void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-gpu-memory-promotion"; } StringRef getDescription() const final { diff --git a/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp b/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp --- a/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp @@ -18,8 +18,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDataLayoutPropagationPass) void getDependentDialects(DialectRegistry ®istry) const override { - registry - .insert(); + registry.insert(); } StringRef getArgument() const final { diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp @@ -26,7 +26,7 @@ TestLinalgDecomposeOps(const TestLinalgDecomposeOps &pass) : PassWrapper(pass){}; void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-linalg-decompose-ops"; } StringRef getDescription() const final { diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp @@ -92,8 +92,8 @@ TestLinalgElementwiseFusion(const TestLinalgElementwiseFusion &pass) : PassWrapper(pass) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-linalg-elementwise-fusion-patterns"; diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp @@ -73,8 +73,8 @@ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgGreedyFusion) void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-linalg-greedy-fusion"; } StringRef getDescription() const final { diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp @@ -39,7 +39,7 @@ void getDependentDialects(DialectRegistry ®istry) const override { // clang-format off - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-linalg-pad-fusion"; } diff --git a/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp b/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp --- a/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp +++ b/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp @@ -32,7 +32,7 @@ void TestComposeSubViewPass::getDependentDialects( DialectRegistry ®istry) const { - registry.insert(); + registry.insert(); } void TestComposeSubViewPass::runOnOperation() { diff --git a/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp b/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp --- a/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp +++ b/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp @@ -24,7 +24,7 @@ TestMultiBufferingPass(const TestMultiBufferingPass &pass) : PassWrapper(pass) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-multi-buffering"; } StringRef getDescription() const final { diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp --- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp +++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp @@ -53,7 +53,7 @@ } void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } Option unroll{*this, "unroll", llvm::cl::desc("Include unrolling"), @@ -128,8 +128,8 @@ TestVectorContractionPrepareForMMTLowering() = default; void getDependentDialects(DialectRegistry ®istry) const override { - registry - .insert(); + registry.insert(); } void runOnOperation() override { @@ -246,7 +246,7 @@ : PassWrapper(pass) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-vector-transfer-unrolling-patterns"; @@ -305,8 +305,8 @@ TestScalarVectorTransferLoweringPatterns() = default; void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() override { @@ -342,7 +342,7 @@ const TestVectorTransferCollapseInnerMostContiguousDims &pass) = default; void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { @@ -488,7 +488,7 @@ void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); + affine::AffineDialect>(); } StringRef getArgument() const final { return "test-vector-warp-distribute"; } diff --git a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp --- a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp +++ b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp @@ -401,8 +401,9 @@ TestTilingInterfacePass(const TestTilingInterfacePass &pass) : PassWrapper(pass) {} void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); linalg::registerTilingInterfaceExternalModels(registry); tensor::registerTilingInterfaceExternalModels(registry); } diff --git a/mlir/unittests/Analysis/Presburger/Parser.h b/mlir/unittests/Analysis/Presburger/Parser.h --- a/mlir/unittests/Analysis/Presburger/Parser.h +++ b/mlir/unittests/Analysis/Presburger/Parser.h @@ -30,7 +30,7 @@ /// represents a valid IntegerSet. inline IntegerPolyhedron parseIntegerPolyhedron(StringRef str) { MLIRContext context(MLIRContext::Threading::DISABLED); - return FlatAffineValueConstraints(parseIntegerSet(str, &context)); + return affine::FlatAffineValueConstraints(parseIntegerSet(str, &context)); } /// Parse a list of StringRefs to IntegerRelation and combine them into a