diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp @@ -313,6 +313,7 @@ LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) { LinalgOp linalgOp = cast(op); + // Expect at least one shaped operand. // This means an op that constructs a tensor out of indices cannot be a // LinalgOp at the moment. For now this will have to be a special op until we @@ -457,47 +458,64 @@ } // Check if given shapes match to inferred shapes. - Optional> loopRanges = linalgOp.getStaticLoopRanges(); - if (!loopRanges) + Optional> endLoopRangeValues = + linalgOp.getStaticLoopRanges(); + if (!endLoopRangeValues) return linalgOp.emitError("unable to find loop range for operation"); + SmallVector startLoopRangeValues((*endLoopRangeValues).size(), 0); // Verify only static cases since we can't get exact dimension sizes and loop // ranges for dynamic cases in this stage. - if (llvm::none_of(*loopRanges, [](int64_t &range) { + if (llvm::none_of(*endLoopRangeValues, [](int64_t &range) { return range == ShapedType::kDynamicSize; })) { - for (int64_t &range : *loopRanges) + for (int64_t &range : *endLoopRangeValues) range -= 1; for (const auto &en : llvm::enumerate(linalgOp.getShapedOperandTypes())) { - auto indices = indexingMaps[en.index()].compose(*loopRanges); + auto firstIndices = + indexingMaps[en.index()].compose(startLoopRangeValues); + auto lastIndices = indexingMaps[en.index()].compose(*endLoopRangeValues); for (auto j : llvm::seq(0, en.value().getRank())) { - // Ignore dynamic dimension or the case that the inferred last index is - // zero. The index is increasing or decreasing in Linalg, for example, - // the last index should be `0` or `size-1`. We only check the cases - // that are non-zero because most of cases are increasing and it is too - // expensive to find the shape of decreasing cases. - if (en.value().isDynamicDim(j) || indices[j] == 0) + // Ignore dynamic dimension + if (en.value().isDynamicDim(j)) continue; - // The size of shaped operands and inferred dimension size should be - // same. But, for now we check if the inferred sizes are in boundary of - // shaped operands' size or not in case that Affine Expressions are - // complicated such as d0 * 3 + d1 since it is not easy to handle the - // issues. - auto inferredSize = indices[j] + 1; + // The first index or last index should be the maximum or the minimum in + // the inferred index ranges since the range is increasing or + // decreasing. The size of dimensions of shaped operands and the maximum + // value + 1 in the inferred range should be the same. But, for now we + // check if the inferred ranges are in boundary of shaped operands' size + // or not in case that Affine Expressions are complicated such as d0 * 3 + // + d1 since it is not easy to handle the issues. auto shapedDimSize = en.value().getDimSize(j); + // if the dimensionsize is 0, then index should be -1, not 0. + if (shapedDimSize == 0) + firstIndices[j] = -1; + auto inferredDimSize = std::max(firstIndices[j], lastIndices[j]) + 1; + if (shapedDimSize > 0 && + std::min(firstIndices[j], lastIndices[j]) < 0) { + std::string mapStr; + { + llvm::raw_string_ostream os(mapStr); + os << indexingMaps[en.index()]; + } + return linalgOp.emitError( + "unexpected result less than 0 at expression #") + << j << " in affineMap\n" + << mapStr; + } if (indexingMaps[en.index()].getResult(j).dyn_cast()) { - if (inferredSize != shapedDimSize) { + if (inferredDimSize != shapedDimSize) { return linalgOp.emitOpError("inferred shaped operand #") << en.index() << " has shape's dimension #" << j << " to be " - << inferredSize << ", but found " << shapedDimSize; + << inferredDimSize << ", but found " << shapedDimSize; } } else { - if (inferredSize > shapedDimSize) { + if (inferredDimSize > shapedDimSize) { return linalgOp.emitOpError("inferred shaped operand #") << en.index() << " has shape's dimension #" << j - << " to be greater than or equal to " << inferredSize + << " to be greater than or equal to " << inferredDimSize << ", but found " << shapedDimSize; } } diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir --- a/mlir/test/Dialect/Linalg/invalid.mlir +++ b/mlir/test/Dialect/Linalg/invalid.mlir @@ -24,41 +24,6 @@ // ----- -func @index_parent() { - // expected-error @+1 {{op expected parent op with LinalgOp interface}} - linalg.index 0 : index -} - -// ----- - -func @index_dim_lower_than_number_of_loops(%arg0: memref) { - // expected-error @+6 {{op expected dim (2) to be lower than the number of loops (0) of the enclosing LinalgOp}} - linalg.generic { - indexing_maps = [ affine_map<() -> ()> ], - iterator_types = []} - outs(%arg0 : memref) { - ^bb(%0: f32): - linalg.index 2 : index - linalg.yield %0 : f32 - } -} - -// ----- - -func @index_dim_negative(%arg0: memref) { - // expected-error @+6 {{op attribute 'dim' failed to satisfy constraint: 64-bit signless integer attribute whose minimum value is 0}} - linalg.generic { - indexing_maps = [ affine_map<() -> ()> ], - iterator_types = []} - outs(%arg0 : memref) { - ^bb(%0: f32): - linalg.index -1 : index - linalg.yield %0 : f32 - } -} - -// ----- - func @generic_no_region(%arg0: memref) { // expected-error @+5 {{expected '{' to begin a region}} linalg.generic { @@ -761,84 +726,23 @@ // ----- -#map0 = affine_map<(d0) -> (24, -d0 + 192)> -#map1 = affine_map<(d0, d1)[s0] -> (d0 * 192 + s0 + d1)> -#map2 = affine_map<(d0) -> (16, -d0 + 192)> - -func private @foo(%A: memref<192x192xf32>, %B: memref<192x192xf32>, - %C: memref<192x192xf32>) -> () - -func @tiled_loop_incorrent_num_yield_operands(%A: memref<192x192xf32>, - %B: memref<192x192xf32>, %C: memref<192x192xf32>, - %C_tensor: tensor<192x192xf32>) { - %c24 = constant 24 : index - %c0 = constant 0 : index - %c192 = constant 192 : index - %0 = linalg.tiled_loop (%i, %j) = (%c0, %c0) to (%c192, %c192) - step (%c24, %c24) - ins (%A, %B: memref<192x192xf32>, memref<192x192xf32>) - outs (%C_tensor, %C :tensor<192x192xf32>, memref<192x192xf32>) { - call @foo(%A, %B, %C) - : (memref<192x192xf32>, memref<192x192xf32>, memref<192x192xf32>)-> () - // expected-error @+1 {{expected number of tensor output args = 1 to match the number of yield operands = 0}} - linalg.yield - } - return -} - -// ----- - -#map0 = affine_map<(d0) -> (24, -d0 + 192)> -#map1 = affine_map<(d0, d1)[s0] -> (d0 * 192 + s0 + d1)> -#map2 = affine_map<(d0) -> (16, -d0 + 192)> - -func private @foo(%A: memref<192x192xf32>, %B: memref<192x192xf32>, - %C: memref<192x192xf32>) -> tensor - -func @tiled_loop_incorrent_yield_operand_type(%A: memref<192x192xf32>, - %B: memref<192x192xf32>, %C: memref<192x192xf32>, - %C_tensor: tensor<192x192xf32>) { - %c24 = constant 24 : index - %c0 = constant 0 : index - %c192 = constant 192 : index - %0 = linalg.tiled_loop (%i, %j) = (%c0, %c0) to (%c192, %c192) - step (%c24, %c24) - ins (%A, %B: memref<192x192xf32>, memref<192x192xf32>) - outs (%C_tensor, %C :tensor<192x192xf32>, memref<192x192xf32>) { - %1 = call @foo(%A, %B, %C) - : (memref<192x192xf32>, memref<192x192xf32>, memref<192x192xf32>)-> tensor - // expected-error @+1 {{expected yield operand 0 with type = 'tensor' to match output arg type = 'tensor<192x192xf32>}} - linalg.yield %1 : tensor - } - return +#attrs = { + indexing_maps = [ + affine_map<(i, j, k) -> (1-i, k)>, + affine_map<(i, j, k) -> (k, j)>, + affine_map<(i, j, k) -> (i, j)> + ], + iterator_types = ["parallel", "parallel", "reduction"] } -// ----- - -#map0 = affine_map<(d0) -> (24, -d0 + 192)> -#map1 = affine_map<(d0, d1)[s0] -> (d0 * 192 + s0 + d1)> -#map2 = affine_map<(d0) -> (16, -d0 + 192)> - -func private @foo(%A: memref<192x192xf32>, %B: memref<192x192xf32>, - %C: memref<192x192xf32>) -> () +func @example(%A: memref<3x5xf32>, %B: memref<5x2xf32>, %C: memref<3x2xf32>) { + // expected-error @+1 {{unexpected result less than 0 at expression #0 in affineMap}} + linalg.generic #attrs ins(%A, %B: memref<3x5xf32>, memref<5x2xf32>) outs(%C: memref<3x2xf32>) { + ^bb0(%a: f32, %b: f32, %c: f32): + %d = mulf %a, %b : f32 + %e = addf %c, %d : f32 + linalg.yield %e : f32 + } -func @tiled_loop_incorrent_iterator_types_count(%A: memref<192x192xf32>, - %B: memref<192x192xf32>, %C: memref<192x192xf32>, - %C_tensor: tensor<192x192xf32>) { - %c24 = constant 24 : index - %c0 = constant 0 : index - %c192 = constant 192 : index - // expected-error @+1 {{expected iterator types array attribute size = 1 to match the number of loops = 2}} - %0 = "linalg.tiled_loop"(%c0, %c0, %c192, %c192, %c24, %c24, %A, %B, %C_tensor, %C) ( { - ^bb0(%arg4: index, %arg5: index): // no predecessors - call @foo(%A, %B, %C) - : (memref<192x192xf32>, memref<192x192xf32>, memref<192x192xf32>)-> () - linalg.yield %C_tensor : tensor<192x192xf32> - }) { - iterator_types = ["parallel"], - operand_segment_sizes = dense<2> : vector<5xi32> - } : (index, index, index, index, index, index, memref<192x192xf32>, - memref<192x192xf32>, tensor<192x192xf32>, memref<192x192xf32> - ) -> tensor<192x192xf32> - return + return }