diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -15,12 +15,12 @@ // AffineToStandard //===----------------------------------------------------------------------===// -def ConvertAffineToStandard : FunctionPass<"lower-affine"> { +def ConvertAffineToStandard : Pass<"lower-affine"> { let summary = "Lower Affine operations to a combination of Standard and SCF " "operations"; let description = [{ - Convert operations from the affine dialect into operations from the loop and + Convert operations from the affine dialect into operations from the SCF and standard dialects. `affine.for` operations are converted to `scf.for` operations that are free diff --git a/mlir/include/mlir/Transforms/Passes.h b/mlir/include/mlir/Transforms/Passes.h --- a/mlir/include/mlir/Transforms/Passes.h +++ b/mlir/include/mlir/Transforms/Passes.h @@ -57,7 +57,7 @@ /// Lowers affine control flow operations (ForStmt, IfStmt and AffineApplyOp) /// to equivalent lower-level constructs (flow of basic blocks and arithmetic /// primitives). -std::unique_ptr> createLowerAffinePass(); +std::unique_ptr createLowerAffinePass(); /// Creates a pass that transforms perfectly nested loops with independent /// bounds into a single loop. diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp --- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp +++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp @@ -672,14 +672,14 @@ namespace { class LowerAffinePass : public ConvertAffineToStandardBase { - void runOnFunction() override { + void runOnOperation() override { OwningRewritePatternList patterns; populateAffineToStdConversionPatterns(patterns, &getContext()); populateAffineToVectorConversionPatterns(patterns, &getContext()); ConversionTarget target(getContext()); target .addLegalDialect(); - if (failed(applyPartialConversion(getFunction(), target, patterns))) + if (failed(applyPartialConversion(getOperation(), target, patterns))) signalPassFailure(); } }; @@ -687,6 +687,6 @@ /// Lowers If and For operations within a function into their lower level CFG /// equivalent blocks. -std::unique_ptr> mlir::createLowerAffinePass() { +std::unique_ptr mlir::createLowerAffinePass() { return std::make_unique(); } diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine-gpu.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine-gpu.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Conversion/AffineToStandard/lower-affine-gpu.mlir @@ -0,0 +1,15 @@ +// RUN: mlir-opt -pass-pipeline="gpu.module(lower-affine)" %s | FileCheck %s + +#map0gpufunc = affine_map<(d0) -> (d0)> +gpu.module @kernels { + gpu.func @foo(%arg0 : index, %arg1 : memref) -> f32 { + %0 = affine.apply #map0gpufunc(%arg0) + %1 = load %arg1[%0] : memref + gpu.return %1 : f32 + } + +// CHECK: gpu.func +// CHECK-SAME: %[[ARG0:.*]]: index +// CHECK-NOT: affine.apply +// CHECK: load %{{.*}}[%[[ARG0]]] +}