diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h @@ -425,10 +425,6 @@ AffineMap map, ArrayRef operands); -/// Returns the values obtained by applying `map` to the list of values. -SmallVector applyMapToValues(OpBuilder &b, Location loc, - AffineMap map, ValueRange values); - /// Given an affine map `map` and its input `operands`, this method composes /// into `map`, maps of AffineApplyOps whose results are the values in /// `operands`, iteratively until no more of `operands` are the result of an diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -1392,33 +1392,6 @@ return makeComposedFoldedMinMax(b, loc, map, operands); } -/// Fully compose map with operands and canonicalize the result. -/// Return the `createOrFold`'ed AffineApply op. -static Value createFoldedComposedAffineApply(OpBuilder &b, Location loc, - AffineMap map, - ValueRange operandsRef) { - SmallVector operands(operandsRef.begin(), operandsRef.end()); - fullyComposeAffineMapAndOperands(&map, &operands); - canonicalizeMapAndOperands(&map, &operands); - return b.createOrFold(loc, map, operands); -} - -SmallVector mlir::affine::applyMapToValues(OpBuilder &b, Location loc, - AffineMap map, - ValueRange values) { - SmallVector res; - res.reserve(map.getNumResults()); - unsigned numDims = map.getNumDims(), numSym = map.getNumSymbols(); - // For each `expr` in `map`, applies the `expr` to the values extracted from - // ranges. If the resulting application can be folded into a Value, the - // folding occurs eagerly. - for (auto expr : map.getResults()) { - AffineMap map = AffineMap::get(numDims, numSym, expr); - res.push_back(createFoldedComposedAffineApply(b, loc, map, values)); - } - return res; -} - // A symbol may appear as a dim in affine.apply operations. This function // canonicalizes dims that are valid symbols into actual symbols. template diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -529,7 +529,7 @@ procInfo.resize( iteratorTypes.size(), linalg::ProcInfo{nullptr, nullptr, linalg::DistributionMethod::None}); - // Collect loop ranges of tiled loopss, loops that are parallel. + // Collect loop ranges of tiled loops, loops that are parallel. SmallVector parallelLoopRanges; for (const auto &iteratorType : llvm::enumerate(iteratorTypes)) { if (!isParallelIterator(iteratorType.value())) @@ -559,10 +559,13 @@ // loop ranges and the iterator types. Apply its inverse to the // resulting loop `ivs` to match the op definition. SmallVector interchangedIvs; - if (!options.interchangeVector.empty()) - interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs); - else + if (!options.interchangeVector.empty()) { + for (AffineExpr result : invPermutationMap.getResults()) + interchangedIvs.push_back( + ivs[result.cast().getPosition()]); + } else { interchangedIvs.assign(ivs.begin(), ivs.end()); + } // Tile the `operandValuesToUse` that either match the `op` operands // themselves or the tile loop arguments forwarding them. diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp @@ -49,15 +49,15 @@ map.getResults().front().cast().getPosition(); unsigned endPos = map.getResults().back().cast().getPosition(); AffineExpr expr; - SmallVector dynamicDims; + SmallVector dynamicDims; for (auto dim : llvm::seq_inclusive(startPos, endPos)) { dynamicDims.push_back(builder.createOrFold(loc, src, dim)); AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos); expr = (expr ? expr * currExpr : currExpr); } - return affine::applyMapToValues( + return affine::makeComposedFoldedAffineApply( builder, loc, AffineMap::get(0, endPos - startPos + 1, expr), - dynamicDims)[0]; + dynamicDims); } /// Given the `src` of a collapsing reshape op and its reassociation maps, @@ -102,12 +102,13 @@ "dimensions"); linearizedStaticDim *= d.value(); } - Value sourceDim = builder.create(loc, src, sourceDimPos); - return affine::applyMapToValues( + OpFoldResult sourceDim = + builder.create(loc, src, sourceDimPos).getResult(); + return affine::makeComposedFoldedAffineApply( builder, loc, AffineMap::get( 0, 1, builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)), - sourceDim)[0]; + sourceDim); } /// Given the `src` of an expanding reshape op, the reassociation maps and the @@ -174,25 +175,17 @@ } // Shape along each dimension is source dim + low pad + high pad. - SmallVector mapOperands; + SmallVector mapOperands; mapOperands.push_back( b.createOrFold(loc, padOp.getSource(), dim)); - AffineExpr expr = b.getAffineDimExpr(0); - unsigned numSymbols = 0; - auto addOpFoldResult = [&](OpFoldResult valueOrAttr) { - if (Value v = llvm::dyn_cast_if_present(valueOrAttr)) { - expr = expr + b.getAffineSymbolExpr(numSymbols++); - mapOperands.push_back(v); - return; - } - int64_t staticValue = - llvm::cast(valueOrAttr.get()).getInt(); - expr = expr + staticValue; - }; - addOpFoldResult(lowPad[dim]); - addOpFoldResult(highPad[dim]); - shapes.push_back(affine::applyMapToValues( - b, loc, AffineMap::get(1, numSymbols, expr), mapOperands)[0]); + mapOperands.push_back(lowPad[dim]); + mapOperands.push_back(highPad[dim]); + AffineExpr expr = b.getAffineDimExpr(0) + b.getAffineSymbolExpr(0) + + b.getAffineSymbolExpr(1); + shapes.push_back(getValueOrCreateConstantIndexOp( + b, loc, + affine::makeComposedFoldedAffineApply( + b, loc, AffineMap::get(1, 2, expr), mapOperands))); } reifiedReturnShapes.emplace_back(std::move(shapes)); return success(); diff --git a/mlir/test/Dialect/Linalg/pad_fusion.mlir b/mlir/test/Dialect/Linalg/pad_fusion.mlir --- a/mlir/test/Dialect/Linalg/pad_fusion.mlir +++ b/mlir/test/Dialect/Linalg/pad_fusion.mlir @@ -22,7 +22,7 @@ return %1 : tensor } -// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)> +// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 + s1 + s2)> // CHECK: func @dynamic_pad_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index @@ -70,7 +70,7 @@ } : tensor<42x?xf32> to tensor<49x?xf32> return %1 : tensor<49x?xf32> } -// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)> +// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 + s1 + s2)> // CHECK: func @mixed_pad_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index diff --git a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir --- a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir +++ b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir @@ -262,8 +262,8 @@ %3 = tensor.dim %0, %c2 : tensor return %1, %2, %3 : index, index, index } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 4)> +// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)> +// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 4)> // CHECK: func @dim_of_pad_op // CHECK-SAME: %[[ARG0:[A-Za-z0-9_]+]]: tensor<2x?x?xf32> // CHECK-SAME: %[[ARG1:[A-Za-z0-9_]+]]: index diff --git a/mlir/test/Dialect/Linalg/vectorization-masked.mlir b/mlir/test/Dialect/Linalg/vectorization-masked.mlir --- a/mlir/test/Dialect/Linalg/vectorization-masked.mlir +++ b/mlir/test/Dialect/Linalg/vectorization-masked.mlir @@ -405,7 +405,7 @@ // ----- -// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s1 + s0)> +// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1)> // CHECK: func @test_masked_vectorize_dynamic_pad func.func @test_masked_vectorize_dynamic_pad( %0 : tensor, %h0 : index, %h1 : index) diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir --- a/mlir/test/Dialect/Tensor/bufferize.mlir +++ b/mlir/test/Dialect/Tensor/bufferize.mlir @@ -547,7 +547,7 @@ // ----- -// CHECK: #[[$sum_map_1:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)> +// CHECK: #[[$sum_map_1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)> // CHECK: #[[$sum_map_2:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 10)> // CHECK-LABEL: func @tensor.pad( // CHECK-SAME: %[[t1:.*]]: tensor, %[[l2:.*]]: index, %[[h1:.*]]: index, %[[h2:.*]]: index