diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td --- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td @@ -1372,51 +1372,46 @@ def DimOp : Std_Op<"dim", [NoSideEffect]> { let summary = "dimension index operation"; let description = [{ - Syntax: - - ``` - operation ::= ssa-id `=` `std.dim` ssa-id `,` integer-literal `:` type - ``` + The `dim` operation takes a memref/tensor and a dimension operand of type + `index`. + It returns the size of the requested dimension of the given memref/tensor. - The `dim` operation takes a memref or tensor operand and a dimension index, - and returns an [`index`](../LangRef.md#index-type) that is the size of that - dimension. - - The `dim` operation is represented with a single integer attribute named - `index`, and the type specifies the type of the memref or tensor operand. + The specified memref or tensor type is that of the first operand. Example: ```mlir // Always returns 4, can be constant folded: - %x = dim %A, 0 : tensor<4 x ? x f32> + %c0 = constant 0 : index + %x = = dim %A, %c0 : tensor<4 x ? x f32> // Returns the dynamic dimension of %A. - %y = dim %A, 1 : tensor<4 x ? x f32> + %c1 = constant 1 : index + %y = dim %A, %c1 : tensor<4 x ? x f32> // Equivalent generic form: - %x = "std.dim"(%A) {index = 0 : i64} : (tensor<4 x ? x f32>) -> index - %y = "std.dim"(%A) {index = 1 : i64} : (tensor<4 x ? x f32>) -> index + %x = "std.dim"(%A, %c0) : (tensor<4 x ? x f32>, index) -> index + %y = "std.dim"(%A, %c1) : (tensor<4 x ? x f32>, index) -> index ``` }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor], "any tensor or memref type">:$memrefOrTensor, - APIntAttr:$index); - let results = (outs Index); + Index:$index); + let results = (outs Index:$result); - let builders = [OpBuilder< - "OpBuilder &builder, OperationState &result, Value memrefOrTensor," - "unsigned index", [{ - auto indexType = builder.getIndexType(); - auto indexAttr = builder.getIntegerAttr(indexType, index); - build(builder, result, indexType, memrefOrTensor, indexAttr); - }]>]; + let assemblyFormat = [{ + attr-dict $memrefOrTensor `,` $index `:` type($memrefOrTensor) + }]; + + let builders = [ + OpBuilder<"OpBuilder &builder, OperationState &result, " + "Value memrefOrTensor, int64_t index"> + ]; let extraClassDeclaration = [{ - unsigned getIndex() { - return getAttrOfType("index").getValue().getZExtValue(); - } + /// Helper function to get the index as a simple integer if it is constant. + Optional getConstantIndex(); }]; let hasFolder = 1; diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -583,7 +583,7 @@ createIndexAttrConstant(builder, loc, indexType, size)); } -/// Builds IR extracting the pos-th size from the descriptor. +/// Builds IR extracting the pos-th stride from the descriptor. Value MemRefDescriptor::stride(OpBuilder &builder, Location loc, unsigned pos) { return builder.create( loc, indexType, value, @@ -2114,17 +2114,24 @@ ConversionPatternRewriter &rewriter) const override { auto dimOp = cast(op); OperandAdaptor transformed(operands); - MemRefType type = dimOp.getOperand().getType().cast(); + MemRefType type = dimOp.memrefOrTensor().getType().cast(); - int64_t index = dimOp.getIndex(); + Optional index = dimOp.getConstantIndex(); + if (!index.hasValue()) { + // TODO(frgossen): Implement this lowering. + return failure(); + } + + int64_t i = index.getValue(); // Extract dynamic size from the memref descriptor. - if (type.isDynamicDim(index)) + if (type.isDynamicDim(i)) rewriter.replaceOp(op, {MemRefDescriptor(transformed.memrefOrTensor()) - .size(rewriter, op->getLoc(), index)}); + .size(rewriter, op->getLoc(), i)}); else // Use constant for static size. - rewriter.replaceOp(op, createIndexConstant(rewriter, op->getLoc(), - type.getDimSize(index))); + rewriter.replaceOp( + op, createIndexConstant(rewriter, op->getLoc(), type.getDimSize(i))); + return success(); } }; diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -173,7 +173,7 @@ // The dim op is okay if its operand memref/tensor is defined at the top // level. if (auto dimOp = dyn_cast(op)) - return isTopLevelValue(dimOp.getOperand()); + return isTopLevelValue(dimOp.memrefOrTensor()); return false; } @@ -197,18 +197,22 @@ static bool isDimOpValidSymbol(DimOp dimOp, Region *region) { // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (isTopLevelValue(dimOp.getOperand())) + if (isTopLevelValue(dimOp.memrefOrTensor())) return true; // The dim op is also okay if its operand memref/tensor is a view/subview // whose corresponding size is a valid symbol. - unsigned index = dimOp.getIndex(); - if (auto viewOp = dyn_cast(dimOp.getOperand().getDefiningOp())) - return isMemRefSizeValidSymbol(viewOp, index, region); - if (auto subViewOp = dyn_cast(dimOp.getOperand().getDefiningOp())) - return isMemRefSizeValidSymbol(subViewOp, index, region); - if (auto allocOp = dyn_cast(dimOp.getOperand().getDefiningOp())) - return isMemRefSizeValidSymbol(allocOp, index, region); + Optional index = dimOp.getConstantIndex(); + assert(index.hasValue() && + "expect only `dim` operations with a constant index"); + int64_t i = index.getValue(); + if (auto viewOp = dyn_cast(dimOp.memrefOrTensor().getDefiningOp())) + return isMemRefSizeValidSymbol(viewOp, i, region); + if (auto subViewOp = + dyn_cast(dimOp.memrefOrTensor().getDefiningOp())) + return isMemRefSizeValidSymbol(subViewOp, i, region); + if (auto allocOp = dyn_cast(dimOp.memrefOrTensor().getDefiningOp())) + return isMemRefSizeValidSymbol(allocOp, i, region); return false; } diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp --- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp @@ -1264,81 +1264,85 @@ // DimOp //===----------------------------------------------------------------------===// -static void print(OpAsmPrinter &p, DimOp op) { - p << "dim " << op.getOperand() << ", " << op.getIndex(); - p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"index"}); - p << " : " << op.getOperand().getType(); -} - -static ParseResult parseDimOp(OpAsmParser &parser, OperationState &result) { - OpAsmParser::OperandType operandInfo; - IntegerAttr indexAttr; - Type type; - Type indexType = parser.getBuilder().getIndexType(); - - return failure( - parser.parseOperand(operandInfo) || parser.parseComma() || - parser.parseAttribute(indexAttr, indexType, "index", result.attributes) || - parser.parseOptionalAttrDict(result.attributes) || - parser.parseColonType(type) || - parser.resolveOperand(operandInfo, type, result.operands) || - parser.addTypeToList(indexType, result.types)); +void DimOp::build(OpBuilder &builder, OperationState &result, + Value memrefOrTensor, int64_t index) { + auto loc = result.location; + Value indexValue = builder.create(loc, index); + auto indexTy = builder.getIndexType(); + build(builder, result, indexTy, memrefOrTensor, indexValue); +} + +Optional DimOp::getConstantIndex() { + auto constantOp = index().getDefiningOp(); + if (constantOp) { + return constantOp.getValue().cast().getInt(); + } + return {}; } static LogicalResult verify(DimOp op) { - // Check that we have an integer index operand. - auto indexAttr = op.getAttrOfType("index"); - if (!indexAttr) - return op.emitOpError("requires an integer attribute named 'index'"); - int64_t index = indexAttr.getInt(); - auto type = op.getOperand().getType(); + // Assume unknown index to be in range. + Optional index = op.getConstantIndex(); + if (!index.hasValue()) + return success(); + + // Check that constant index is not knowingly out of range. + auto type = op.memrefOrTensor().getType(); if (auto tensorType = type.dyn_cast()) { - if (index >= tensorType.getRank()) + if (index.getValue() >= tensorType.getRank()) return op.emitOpError("index is out of range"); } else if (auto memrefType = type.dyn_cast()) { - if (index >= memrefType.getRank()) + if (index.getValue() >= memrefType.getRank()) return op.emitOpError("index is out of range"); - } else if (type.isa()) { - // ok, assumed to be in-range. + // Assume index to be in range. } else { - return op.emitOpError("requires an operand with tensor or memref type"); + llvm_unreachable("expected operand with tensor or memref type"); } return success(); } OpFoldResult DimOp::fold(ArrayRef operands) { - // Constant fold dim when the size along the index referred to is a constant. - auto opType = memrefOrTensor().getType(); - if (auto shapedType = opType.dyn_cast()) - if (!shapedType.isDynamicDim(getIndex())) - return IntegerAttr::get(IndexType::get(getContext()), - shapedType.getShape()[getIndex()]); - - // Fold dim to the size argument for an AllocOp/ViewOp/SubViewOp. - auto memrefType = opType.dyn_cast(); + auto index = operands[1].dyn_cast(); + + // All forms of folding require a known index. + if (!index) + return {}; + + // Fold if the shape extent along the given index is known. + auto argTy = memrefOrTensor().getType(); + if (auto shapedTy = argTy.dyn_cast()) { + if (!shapedTy.isDynamicDim(index.getInt())) { + Builder builder(getContext()); + return builder.getIndexAttr(shapedTy.getShape()[index.getInt()]); + } + } + + // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`. + auto memrefType = argTy.dyn_cast(); if (!memrefType) return {}; - // The size at getIndex() is now known to be a dynamic size of a memref. + // The size at the given index is now known to be a dynamic size of a memref. auto memref = memrefOrTensor().getDefiningOp(); + unsigned unsignedIndex = index.getValue().getZExtValue(); if (auto alloc = dyn_cast_or_null(memref)) return *(alloc.getDynamicSizes().begin() + - memrefType.getDynamicDimIndex(getIndex())); + memrefType.getDynamicDimIndex(unsignedIndex)); if (auto view = dyn_cast_or_null(memref)) return *(view.getDynamicSizes().begin() + - memrefType.getDynamicDimIndex(getIndex())); + memrefType.getDynamicDimIndex(unsignedIndex)); if (auto subview = dyn_cast_or_null(memref)) { - assert(subview.isDynamicSize(getIndex()) && + assert(subview.isDynamicSize(unsignedIndex) && "Expected dynamic subview size"); - return subview.getDynamicSize(getIndex()); + return subview.getDynamicSize(unsignedIndex); } - /// dim(memrefcast) -> dim + // dim(memrefcast) -> dim if (succeeded(foldMemRefCast(*this))) return getResult(); diff --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir --- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir +++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir @@ -208,23 +208,23 @@ %c0 = constant 0 : index %c3 = constant 3 : index %c2 = constant 2 : index - %0 = dim %arg0, 0 : memref - %1 = dim %arg0, 1 : memref + %0 = dim %arg0, %c0 : memref + %1 = dim %arg0, %c1 : memref scf.parallel (%arg3, %arg4) = (%c0, %c0) to (%0, %1) step (%c2, %c3) { - %2 = dim %arg0, 0 : memref + %2 = dim %arg0, %c0 : memref %3 = affine.min #map1(%arg3)[%2] %squared_min = muli %3, %3 : index - %4 = dim %arg0, 1 : memref + %4 = dim %arg0, %c1 : memref %5 = affine.min #map2(%arg4)[%4] %6 = std.subview %arg0[%arg3, %arg4][%squared_min, %5][%c1, %c1] : memref to memref - %7 = dim %arg1, 0 : memref + %7 = dim %arg1, %c0 : memref %8 = affine.min #map1(%arg3)[%7] - %9 = dim %arg1, 1 : memref + %9 = dim %arg1, %c1 : memref %10 = affine.min #map2(%arg4)[%9] %11 = std.subview %arg1[%arg3, %arg4][%8, %10][%c1, %c1] : memref to memref - %12 = dim %arg2, 0 : memref + %12 = dim %arg2, %c0 : memref %13 = affine.min #map1(%arg3)[%12] - %14 = dim %arg2, 1 : memref + %14 = dim %arg2, %c1 : memref %15 = affine.min #map2(%arg4)[%14] %16 = std.subview %arg2[%arg3, %arg4][%13, %15][%c1, %c1] : memref to memref scf.parallel (%arg5, %arg6) = (%c0, %c0) to (%squared_min, %5) step (%c1, %c1) { @@ -251,42 +251,42 @@ // CHECK: module { // CHECK-LABEL: func @sum( // CHECK-SAME: [[VAL_0:%.*]]: memref, [[VAL_1:%.*]]: memref, [[VAL_2:%.*]]: memref) { -// CHECK: [[VAL_3:%.*]] = constant 1 : index -// CHECK: [[VAL_4:%.*]] = constant 0 : index -// CHECK: [[VAL_5:%.*]] = constant 3 : index -// CHECK: [[VAL_6:%.*]] = constant 2 : index -// CHECK: [[VAL_7:%.*]] = dim [[VAL_0]], 0 : memref -// CHECK: [[VAL_8:%.*]] = dim [[VAL_0]], 1 : memref +// CHECK: %[[C1:.*]] = constant 1 : index +// CHECK: %[[C0:.*]] = constant 0 : index +// CHECK: %[[C3:.*]] = constant 3 : index +// CHECK: %[[C2:.*]] = constant 2 : index +// CHECK: [[VAL_7:%.*]] = dim [[VAL_0]], %[[C0]] : memref +// CHECK: [[VAL_8:%.*]] = dim [[VAL_0]], %[[C1]] : memref // CHECK: [[VAL_9:%.*]] = constant 1 : index -// CHECK: [[VAL_10:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_7]], [[VAL_4]], [[VAL_6]]] -// CHECK: [[VAL_11:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_8]], [[VAL_4]], [[VAL_5]]] +// CHECK: [[VAL_10:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_7]], %[[C0]], %[[C2]]] +// CHECK: [[VAL_11:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_8]], %[[C0]], %[[C3]]] // CHECK: [[VAL_12:%.*]] = constant 4 : index -// CHECK: [[VAL_13:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_12]], [[VAL_4]], [[VAL_3]]] +// CHECK: [[VAL_13:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_12]], %[[C0]], %[[C1]]] // CHECK: [[VAL_14:%.*]] = constant 3 : index -// CHECK: [[VAL_15:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_14]], [[VAL_4]], [[VAL_3]]] +// CHECK: [[VAL_15:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_14]], %[[C0]], %[[C1]]] // CHECK: gpu.launch blocks([[VAL_16:%.*]], [[VAL_17:%.*]], [[VAL_18:%.*]]) in ([[VAL_19:%.*]] = [[VAL_10]], [[VAL_20:%.*]] = [[VAL_11]], [[VAL_21:%.*]] = [[VAL_9]]) threads([[VAL_22:%.*]], [[VAL_23:%.*]], [[VAL_24:%.*]]) in ([[VAL_25:%.*]] = [[VAL_13]], [[VAL_26:%.*]] = [[VAL_15]], [[VAL_27:%.*]] = [[VAL_9]]) { -// CHECK: [[VAL_28:%.*]] = affine.apply #[[MAP2]]([[VAL_16]]){{\[}}[[VAL_6]], [[VAL_4]]] -// CHECK: [[VAL_29:%.*]] = affine.apply #[[MAP2]]([[VAL_17]]){{\[}}[[VAL_5]], [[VAL_4]]] -// CHECK: [[VAL_30:%.*]] = dim [[VAL_0]], 0 : memref +// CHECK: [[VAL_28:%.*]] = affine.apply #[[MAP2]]([[VAL_16]]){{\[}}%[[C2]], %[[C0]]] +// CHECK: [[VAL_29:%.*]] = affine.apply #[[MAP2]]([[VAL_17]]){{\[}}%[[C3]], %[[C0]]] +// CHECK: [[VAL_30:%.*]] = dim [[VAL_0]], %[[C0]] : memref // CHECK: [[VAL_31:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_30]]] // CHECK: [[VAL_31_SQUARED:%.*]] = muli [[VAL_31]], [[VAL_31]] : index -// CHECK: [[VAL_32:%.*]] = dim [[VAL_0]], 1 : memref +// CHECK: [[VAL_32:%.*]] = dim [[VAL_0]], %[[C1]] : memref // CHECK: [[VAL_33:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_32]]] -// CHECK: [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref to memref -// CHECK: [[VAL_35:%.*]] = dim [[VAL_1]], 0 : memref +// CHECK: [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}%[[C1]], %[[C1]]] : memref to memref +// CHECK: [[VAL_35:%.*]] = dim [[VAL_1]], %[[C0]] : memref // CHECK: [[VAL_36:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_35]]] -// CHECK: [[VAL_37:%.*]] = dim [[VAL_1]], 1 : memref +// CHECK: [[VAL_37:%.*]] = dim [[VAL_1]], %[[C1]] : memref // CHECK: [[VAL_38:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_37]]] -// CHECK: [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref to memref -// CHECK: [[VAL_40:%.*]] = dim [[VAL_2]], 0 : memref +// CHECK: [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}%[[C1]], %[[C1]]] : memref to memref +// CHECK: [[VAL_40:%.*]] = dim [[VAL_2]], %[[C0]] : memref // CHECK: [[VAL_41:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_40]]] -// CHECK: [[VAL_42:%.*]] = dim [[VAL_2]], 1 : memref +// CHECK: [[VAL_42:%.*]] = dim [[VAL_2]], %[[C1]] : memref // CHECK: [[VAL_43:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]] -// CHECK: [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref to memref -// CHECK: [[VAL_45:%.*]] = affine.apply #[[MAP2]]([[VAL_22]]){{\[}}[[VAL_3]], [[VAL_4]]] +// CHECK: [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref to memref +// CHECK: [[VAL_45:%.*]] = affine.apply #[[MAP2]]([[VAL_22]]){{\[}}%[[C1]], %[[C0]]] // CHECK: [[VAL_46:%.*]] = cmpi "slt", [[VAL_45]], [[VAL_31_SQUARED]] : index // CHECK: scf.if [[VAL_46]] { -// CHECK: [[VAL_47:%.*]] = affine.apply #[[MAP2]]([[VAL_23]]){{\[}}[[VAL_3]], [[VAL_4]]] +// CHECK: [[VAL_47:%.*]] = affine.apply #[[MAP2]]([[VAL_23]]){{\[}}%[[C1]], %[[C0]]] // CHECK: [[VAL_48:%.*]] = cmpi "slt", [[VAL_47]], [[VAL_33]] : index // CHECK: scf.if [[VAL_48]] { // CHECK: [[VAL_49:%.*]] = load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref diff --git a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-std-to-llvm %s | FileCheck %s +// RUN: mlir-opt -convert-std-to-llvm %s | FileCheck %s --dump-input-on-failure // RUN: mlir-opt -convert-std-to-llvm='use-aligned-alloc=1' %s | FileCheck %s --check-prefix=ALIGNED-ALLOC // CHECK-LABEL: func @check_strided_memref_arguments( @@ -355,15 +355,20 @@ // CHECK-LABEL: func @mixed_memref_dim func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) { -// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64 - %0 = dim %mixed, 0 : memref<42x?x?x13x?xf32> -// CHECK-NEXT: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> - %1 = dim %mixed, 1 : memref<42x?x?x13x?xf32> -// CHECK-NEXT: llvm.extractvalue %[[ld]][3, 2] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> - %2 = dim %mixed, 2 : memref<42x?x?x13x?xf32> -// CHECK-NEXT: llvm.mlir.constant(13 : index) : !llvm.i64 - %3 = dim %mixed, 3 : memref<42x?x?x13x?xf32> -// CHECK-NEXT: llvm.extractvalue %[[ld]][3, 4] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> - %4 = dim %mixed, 4 : memref<42x?x?x13x?xf32> +// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64 + %c0 = constant 0 : index + %0 = dim %mixed, %c0 : memref<42x?x?x13x?xf32> +// CHECK: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> + %c1 = constant 1 : index + %1 = dim %mixed, %c1 : memref<42x?x?x13x?xf32> +// CHECK: llvm.extractvalue %[[ld]][3, 2] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> + %c2 = constant 2 : index + %2 = dim %mixed, %c2 : memref<42x?x?x13x?xf32> +// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64 + %c3 = constant 3 : index + %3 = dim %mixed, %c3 : memref<42x?x?x13x?xf32> +// CHECK: llvm.extractvalue %[[ld]][3, 4] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> + %c4 = constant 4 : index + %4 = dim %mixed, %c4 : memref<42x?x?x13x?xf32> return } diff --git a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir @@ -351,19 +351,24 @@ func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) { // CHECK: llvm.mlir.constant(42 : index) : !llvm.i64 // BAREPTR: llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }"> -// BAREPTR-NEXT: llvm.mlir.constant(42 : index) : !llvm.i64 - %0 = dim %static, 0 : memref<42x32x15x13x27xf32> -// CHECK-NEXT: llvm.mlir.constant(32 : index) : !llvm.i64 -// BAREPTR-NEXT: llvm.mlir.constant(32 : index) : !llvm.i64 - %1 = dim %static, 1 : memref<42x32x15x13x27xf32> -// CHECK-NEXT: llvm.mlir.constant(15 : index) : !llvm.i64 -// BAREPTR-NEXT: llvm.mlir.constant(15 : index) : !llvm.i64 - %2 = dim %static, 2 : memref<42x32x15x13x27xf32> -// CHECK-NEXT: llvm.mlir.constant(13 : index) : !llvm.i64 -// BAREPTR-NEXT: llvm.mlir.constant(13 : index) : !llvm.i64 - %3 = dim %static, 3 : memref<42x32x15x13x27xf32> -// CHECK-NEXT: llvm.mlir.constant(27 : index) : !llvm.i64 -// BAREPTR-NEXT: llvm.mlir.constant(27 : index) : !llvm.i64 - %4 = dim %static, 4 : memref<42x32x15x13x27xf32> +// BAREPTR: llvm.mlir.constant(42 : index) : !llvm.i64 + %c0 = constant 0 : index + %0 = dim %static, %c0 : memref<42x32x15x13x27xf32> +// CHECK: llvm.mlir.constant(32 : index) : !llvm.i64 +// BAREPTR: llvm.mlir.constant(32 : index) : !llvm.i64 + %c1 = constant 1 : index + %1 = dim %static, %c1 : memref<42x32x15x13x27xf32> +// CHECK: llvm.mlir.constant(15 : index) : !llvm.i64 +// BAREPTR: llvm.mlir.constant(15 : index) : !llvm.i64 + %c2 = constant 2 : index + %2 = dim %static, %c2 : memref<42x32x15x13x27xf32> +// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64 +// BAREPTR: llvm.mlir.constant(13 : index) : !llvm.i64 + %c3 = constant 3 : index + %3 = dim %static, %c3 : memref<42x32x15x13x27xf32> +// CHECK: llvm.mlir.constant(27 : index) : !llvm.i64 +// BAREPTR: llvm.mlir.constant(27 : index) : !llvm.i64 + %c4 = constant 4 : index + %4 = dim %static, %c4 : memref<42x32x15x13x27xf32> return } diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir --- a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir @@ -45,8 +45,8 @@ } } // CHECK: %[[tensor:[0-9]+]] = alloc - // CHECK-NOT: {{.*}} dim %[[tensor]], 0 - // CHECK-NOT: {{.*}} dim %[[tensor]], 3 + // CHECK-NOT: {{.*}} dim %[[tensor]], %c0 + // CHECK-NOT: {{.*}} dim %[[tensor]], %c3 return } @@ -233,7 +233,7 @@ // CHECK-DAG: %[[splat:.*]] = constant dense<7.000000e+00> : vector<15xf32> // CHECK-DAG: %[[alloc:.*]] = alloca() {alignment = 128 : i64} : memref<3xvector<15xf32>> - // CHECK-DAG: %[[dim:.*]] = dim %[[A]], 0 : memref + // CHECK-DAG: %[[dim:.*]] = dim %[[A]], %c0 : memref // CHECK: affine.for %[[I:.*]] = 0 to 3 { // CHECK: %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]] // CHECK: %[[cond1:.*]] = cmpi "slt", %[[add]], %[[dim]] : index @@ -249,7 +249,7 @@ // FULL-UNROLL: %[[pad:.*]] = constant 7.000000e+00 : f32 // FULL-UNROLL: %[[VEC0:.*]] = constant dense<7.000000e+00> : vector<3x15xf32> // FULL-UNROLL: %[[SPLAT:.*]] = constant dense<7.000000e+00> : vector<15xf32> - // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], 0 : memref + // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], %c0 : memref // FULL-UNROLL: cmpi "slt", %[[base]], %[[DIM]] : index // FULL-UNROLL: %[[VEC1:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) { // FULL-UNROLL: vector.transfer_read %[[A]][%[[base]], %[[base]]], %[[pad]] : memref, vector<15xf32> @@ -307,7 +307,7 @@ // CHECK: %[[alloc:.*]] = alloca() {alignment = 128 : i64} : memref<3xvector<15xf32>> // CHECK: %[[vmemref:.*]] = vector.type_cast %[[alloc]] : memref<3xvector<15xf32>> to memref> // CHECK: store %[[vec]], %[[vmemref]][] : memref> - // CHECK: %[[dim:.*]] = dim %[[A]], 0 : memref + // CHECK: %[[dim:.*]] = dim %[[A]], %c0 : memref // CHECK: affine.for %[[I:.*]] = 0 to 3 { // CHECK: %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]] // CHECK: %[[cmp:.*]] = cmpi "slt", %[[add]], %[[dim]] : index @@ -316,7 +316,7 @@ // CHECK: vector.transfer_write %[[vec_1d]], %[[A]][%[[add]], %[[base]]] : vector<15xf32>, memref // CHECK: } - // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], 0 : memref + // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], %c0 : memref // FULL-UNROLL: %[[CMP0:.*]] = cmpi "slt", %[[base]], %[[DIM]] : index // FULL-UNROLL: scf.if %[[CMP0]] { // FULL-UNROLL: %[[V0:.*]] = vector.extract %[[vec]][0] : vector<3x15xf32> diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir @@ -12,14 +12,18 @@ // Maps introduced to vectorize fastest varying memory index. // CHECK-LABEL: func @vec1d_1 func @vec1d_1(%A : memref, %B : memref) { -// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref // CHECK: for {{.*}} step 128 // CHECK-NEXT: %{{.*}} = affine.apply #map0(%[[C0]]) @@ -27,42 +31,50 @@ // CHECK-NEXT: %{{.*}} = constant 0.0{{.*}}: f32 // CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref, vector<128xf32> affine.for %i0 = 0 to %M { // vectorized due to scalar -> vector - %a0 = affine.load %A[%cst0, %cst0] : memref + %a0 = affine.load %A[%c0, %c0] : memref } return } // CHECK-LABEL: func @vec1d_2 func @vec1d_2(%A : memref, %B : memref) { -// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK:for [[IV3:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128 // CHECK-NEXT: %[[CST:.*]] = constant 0.0{{.*}}: f32 // CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %[[CST]] : memref, vector<128xf32> affine.for %i3 = 0 to %M { // vectorized - %a3 = affine.load %A[%cst0, %i3] : memref + %a3 = affine.load %A[%c0, %i3] : memref } return } // CHECK-LABEL: func @vec1d_3 func @vec1d_3(%A : memref, %B : memref) { -// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %arg0, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %arg0, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %arg1, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %arg0, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %arg0, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %arg1, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK:for [[IV8:%[arg0-9]+]] = 0 to [[ARG_M]] step 128 // CHECK-NEXT: for [[IV9:%[arg0-9]*]] = 0 to [[ARG_N]] { // CHECK-NEXT: %[[APP9_0:[0-9]+]] = affine.apply {{.*}}([[IV9]], [[IV8]]) @@ -131,15 +143,19 @@ // CHECK-LABEL: func @vec_rejected_1 func @vec_rejected_1(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK:for {{.*}} [[ARG_M]] { affine.for %i1 = 0 to %M { // not vectorized %a1 = affine.load %A[%i1, %i1] : memref @@ -149,33 +165,41 @@ // CHECK-LABEL: func @vec_rejected_2 func @vec_rejected_2(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to [[ARG_M]] { affine.for %i2 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1 - %a2 = affine.load %A[%i2, %cst0] : memref + %a2 = affine.load %A[%i2, %c0] : memref } return } // CHECK-LABEL: func @vec_rejected_3 func @vec_rejected_3(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK:for [[IV4:%[arg0-9]+]] = 0 to [[ARG_M]] step 128 { // CHECK-NEXT: for [[IV5:%[arg0-9]*]] = 0 to [[ARG_N]] { // CHECK-NEXT: %{{.*}} = constant 0.0{{.*}}: f32 @@ -190,15 +214,19 @@ // CHECK-LABEL: func @vec_rejected_4 func @vec_rejected_4(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: for [[IV6:%[arg0-9]*]] = 0 to [[ARG_M]] { // CHECK-NEXT: for [[IV7:%[arg0-9]*]] = 0 to [[ARG_N]] { affine.for %i6 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1 @@ -211,15 +239,19 @@ // CHECK-LABEL: func @vec_rejected_5 func @vec_rejected_5(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: for [[IV10:%[arg0-9]*]] = 0 to %{{[0-9]*}} { // CHECK: for [[IV11:%[arg0-9]*]] = 0 to %{{[0-9]*}} { affine.for %i10 = 0 to %M { // not vectorized, need per load transposes @@ -233,15 +265,19 @@ // CHECK-LABEL: func @vec_rejected_6 func @vec_rejected_6(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: for [[IV12:%[arg0-9]*]] = 0 to %{{[0-9]*}} { // CHECK: for [[IV13:%[arg0-9]*]] = 0 to %{{[0-9]*}} { // CHECK: for [[IV14:%[arg0-9]+]] = 0 to [[ARG_P]] step 128 @@ -257,15 +293,19 @@ // CHECK-LABEL: func @vec_rejected_7 func @vec_rejected_7(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} { affine.for %i16 = 0 to %M { // not vectorized, can't vectorize a vector load %a16 = alloc(%M) : memref> @@ -276,15 +316,19 @@ // CHECK-LABEL: func @vec_rejected_8 func @vec_rejected_8(%A : memref, %B : memref) { -// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} { // CHECK: for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128 // CHECK: %{{.*}} = affine.apply #map0(%{{.*}}) @@ -293,7 +337,7 @@ // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref, vector<128xf32> affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %{{.*}} in DFS post-order prevents vectorizing %{{.*}} affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector - %a18 = affine.load %A[%cst0, %cst0] : memref + %a18 = affine.load %A[%c0, %c0] : memref } } return @@ -301,15 +345,19 @@ // CHECK-LABEL: func @vec_rejected_9 func @vec_rejected_9(%A : memref, %B : memref) { -// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} { // CHECK: for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128 // CHECK: %{{.*}} = affine.apply #map0(%{{.*}}) @@ -318,7 +366,7 @@ // CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref, vector<128xf32> affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %{{.*}} affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector - %a18 = affine.load %A[%cst0, %cst0] : memref + %a18 = affine.load %A[%c0, %c0] : memref } } return @@ -326,19 +374,23 @@ // CHECK-LABEL: func @vec_rejected_10 func @vec_rejected_10(%A : memref, %B : memref) { -// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index -// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref -// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref -// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %B, 2 : memref - %cst0 = constant 0 : index -// +// CHECK-DAG: %[[C0:.*]] = constant 0 : index +// CHECK-DAG: %[[C1:.*]] = constant 1 : index +// CHECK-DAG: %[[C2:.*]] = constant 2 : index +// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref +// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref +// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %B, %c2 : memref + // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} { affine.for %i15 = 0 to %M { // not vectorized due to condition below affine.if #set0(%i15) { - %a15 = affine.load %A[%cst0, %cst0] : memref + %a15 = affine.load %A[%c0, %c0] : memref } } return @@ -347,7 +399,8 @@ // This should not vectorize and should not crash. // CHECK-LABEL: @vec_rejected_11 func @vec_rejected_11(%A : memref, %C : memref) { - %N = dim %A, 0 : memref + %c0 = constant 0 : index + %N = dim %A, %c0 : memref affine.for %i = 0 to %N { // CHECK-NOT: vector %a = affine.load %A[%i, %i] : memref // not vectorized @@ -365,7 +418,8 @@ // This should not vectorize due to the sequential dependence in the scf. // CHECK-LABEL: @vec_rejected_sequential func @vec_rejected_sequential(%A : memref) { - %N = dim %A, 0 : memref + %c0 = constant 0 : index + %N = dim %A, %c0 : memref affine.for %i = 0 to %N { // CHECK-NOT: vector %a = affine.load %A[%i] : memref diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir @@ -12,9 +12,12 @@ // VECT-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)> func @vec2d(%A : memref) { - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %A, 2 : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %A, %c2 : memref // CHECK: for {{.*}} = 0 to %{{.*}} { // CHECK: for {{.*}} = 0 to %{{.*}} step 32 // CHECK: for {{.*}} = 0 to %{{.*}} step 256 @@ -100,13 +103,15 @@ // VECT-LABEL: func @vectorize_matmul func @vectorize_matmul(%arg0: memref, %arg1: memref, %arg2: memref) { %c0 = constant 0 : index - %M = dim %arg0, 0 : memref - %K = dim %arg0, 1 : memref - %N = dim %arg2, 1 : memref + %c1 = constant 1 : index + %M = dim %arg0, %c0 : memref + %K = dim %arg0, %c1 : memref + %N = dim %arg2, %c1 : memref // VECT: %[[C0:.*]] = constant 0 : index - // VECT-NEXT: %[[M:.*]] = dim %{{.*}}, 0 : memref - // VECT-NEXT: %[[K:.*]] = dim %{{.*}}, 1 : memref - // VECT-NEXT: %[[N:.*]] = dim %{{.*}}, 1 : memref + // VECT-NEXT: %[[C1:.*]] = constant 1 : index + // VECT-NEXT: %[[M:.*]] = dim %{{.*}}, %[[C0]] : memref + // VECT-NEXT: %[[K:.*]] = dim %{{.*}}, %[[C1]] : memref + // VECT-NEXT: %[[N:.*]] = dim %{{.*}}, %[[C1]] : memref // VECT: {{.*}} #[[map_id1]](%[[M]]) step 4 { // VECT-NEXT: {{.*}} #[[map_id1]](%[[N]]) step 8 { // VECT: %[[VC0:.*]] = constant dense<0.000000e+00> : vector<4x8xf32> diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir @@ -4,25 +4,28 @@ // CHECK: #[[map_proj_d0d1d2_d0d1d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> func @vec3d(%A : memref) { - %0 = dim %A, 0 : memref - %1 = dim %A, 1 : memref - %2 = dim %A, 2 : memref - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 64 { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} : memref, vector<32x64x256xf32> - affine.for %t0 = 0 to %0 { - affine.for %t1 = 0 to %0 { - affine.for %i0 = 0 to %0 { - affine.for %i1 = 0 to %1 { - affine.for %i2 = 0 to %2 { - %a2 = affine.load %A[%i0, %i1, %i2] : memref - } - } - } - } - } - return + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %0 = dim %A, %c0 : memref + %1 = dim %A, %c1 : memref + %2 = dim %A, %c2 : memref + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 64 { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} : memref, vector<32x64x256xf32> + affine.for %t0 = 0 to %0 { + affine.for %t1 = 0 to %0 { + affine.for %i0 = 0 to %0 { + affine.for %i1 = 0 to %1 { + affine.for %i2 = 0 to %2 { + %a2 = affine.load %A[%i0, %i1, %i2] : memref + } + } + } + } + } + return } diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir @@ -4,31 +4,34 @@ // CHECK: #[[map_proj_d0d1d2_d0d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d2)> func @vec2d(%A : memref) { - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %A, 2 : memref - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 - // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d0d2]]} : memref, vector<32x256xf32> - affine.for %i0 = 0 to %M { - affine.for %i1 = 0 to %N { - affine.for %i2 = 0 to %P { - %a2 = affine.load %A[%i0, %i1, %i2] : memref - } - } - } - // CHECK: for {{.*}} = 0 to %{{.*}} { - // CHECK: for {{.*}} = 0 to %{{.*}} { - // CHECK: for {{.*}} = 0 to %{{.*}} { - // For the case: --test-fastest-varying=2 --test-fastest-varying=0 no - // vectorization happens because of loop nesting order - affine.for %i3 = 0 to %M { - affine.for %i4 = 0 to %N { - affine.for %i5 = 0 to %P { - %a5 = affine.load %A[%i4, %i5, %i3] : memref - } - } - } - return + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %A, %c2 : memref + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 + // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d0d2]]} : memref, vector<32x256xf32> + affine.for %i0 = 0 to %M { + affine.for %i1 = 0 to %N { + affine.for %i2 = 0 to %P { + %a2 = affine.load %A[%i0, %i1, %i2] : memref + } + } + } + // CHECK: for {{.*}} = 0 to %{{.*}} { + // CHECK: for {{.*}} = 0 to %{{.*}} { + // CHECK: for {{.*}} = 0 to %{{.*}} { + // For the case: --test-fastest-varying=2 --test-fastest-varying=0 no + // vectorization happens because of loop nesting order + affine.for %i3 = 0 to %M { + affine.for %i4 = 0 to %N { + affine.for %i5 = 0 to %P { + %a5 = affine.load %A[%i4, %i5, %i3] : memref + } + } + } + return } diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir @@ -4,62 +4,68 @@ // CHECK: #[[map_proj_d0d1d2_d2d0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d0)> func @vec2d(%A : memref) { - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %A, 2 : memref - // CHECK: for {{.*}} = 0 to %{{.*}} { - // CHECK: for {{.*}} = 0 to %{{.*}} { - // CHECK: for {{.*}} = 0 to %{{.*}} { - // For the case: --test-fastest-varying=0 --test-fastest-varying=2 no - // vectorization happens because of loop nesting order. - affine.for %i0 = 0 to %M { - affine.for %i1 = 0 to %N { - affine.for %i2 = 0 to %P { - %a2 = affine.load %A[%i0, %i1, %i2] : memref - } - } - } - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> - affine.for %i3 = 0 to %M { - affine.for %i4 = 0 to %N { - affine.for %i5 = 0 to %P { - %a5 = affine.load %A[%i4, %i5, %i3] : memref - } - } - } - return + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %A, %c2 : memref + // CHECK: for {{.*}} = 0 to %{{.*}} { + // CHECK: for {{.*}} = 0 to %{{.*}} { + // CHECK: for {{.*}} = 0 to %{{.*}} { + // For the case: --test-fastest-varying=0 --test-fastest-varying=2 no + // vectorization happens because of loop nesting order. + affine.for %i0 = 0 to %M { + affine.for %i1 = 0 to %N { + affine.for %i2 = 0 to %P { + %a2 = affine.load %A[%i0, %i1, %i2] : memref + } + } + } + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> + affine.for %i3 = 0 to %M { + affine.for %i4 = 0 to %N { + affine.for %i5 = 0 to %P { + %a5 = affine.load %A[%i4, %i5, %i3] : memref + } + } + } + return } func @vec2d_imperfectly_nested(%A : memref) { - %0 = dim %A, 0 : memref - %1 = dim %A, 1 : memref - %2 = dim %A, 2 : memref - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> - affine.for %i0 = 0 to %0 { - affine.for %i1 = 0 to %1 { - affine.for %i2 = 0 to %2 { - %a2 = affine.load %A[%i2, %i1, %i0] : memref - } - } - affine.for %i3 = 0 to %1 { - affine.for %i4 = 0 to %2 { - %a4 = affine.load %A[%i3, %i4, %i0] : memref - } - affine.for %i5 = 0 to %2 { - %a5 = affine.load %A[%i3, %i5, %i0] : memref - } - } - } - return + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %0 = dim %A, %c0 : memref + %1 = dim %A, %c1 : memref + %2 = dim %A, %c2 : memref + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref, vector<32x256xf32> + affine.for %i0 = 0 to %0 { + affine.for %i1 = 0 to %1 { + affine.for %i2 = 0 to %2 { + %a2 = affine.load %A[%i2, %i1, %i0] : memref + } + } + affine.for %i3 = 0 to %1 { + affine.for %i4 = 0 to %2 { + %a4 = affine.load %A[%i3, %i4, %i0] : memref + } + affine.for %i5 = 0 to %2 { + %a5 = affine.load %A[%i3, %i5, %i0] : memref + } + } + } + return } diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir @@ -4,63 +4,69 @@ // CHECK-DAG: #[[map_proj_d0d1d2_d2d1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d1)> func @vec2d(%A : memref) { - %M = dim %A, 0 : memref - %N = dim %A, 1 : memref - %P = dim %A, 2 : memref - // CHECK: for {{.*}} = 0 to %{{.*}} { - // CHECK: for {{.*}} = 0 to %{{.*}} { - // CHECK: for {{.*}} = 0 to %{{.*}} { - // For the case: --test-fastest-varying=0 --test-fastest-varying=1 no - // vectorization happens because of loop nesting order. + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %M = dim %A, %c0 : memref + %N = dim %A, %c1 : memref + %P = dim %A, %c2 : memref + // CHECK: for {{.*}} = 0 to %{{.*}} { + // CHECK: for {{.*}} = 0 to %{{.*}} { + // CHECK: for {{.*}} = 0 to %{{.*}} { + // For the case: --test-fastest-varying=0 --test-fastest-varying=1 no + // vectorization happens because of loop nesting order. affine.for %i0 = 0 to %M { - affine.for %i1 = 0 to %N { - affine.for %i2 = 0 to %P { - %a2 = affine.load %A[%i0, %i1, %i2] : memref - } - } - } - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 - // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> - affine.for %i3 = 0 to %M { - affine.for %i4 = 0 to %N { - affine.for %i5 = 0 to %P { - %a5 = affine.load %A[%i4, %i5, %i3] : memref - } - } - } - return + affine.for %i1 = 0 to %N { + affine.for %i2 = 0 to %P { + %a2 = affine.load %A[%i0, %i1, %i2] : memref + } + } + } + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 + // CHECK: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> + affine.for %i3 = 0 to %M { + affine.for %i4 = 0 to %N { + affine.for %i5 = 0 to %P { + %a5 = affine.load %A[%i4, %i5, %i3] : memref + } + } + } + return } func @vec2d_imperfectly_nested(%A : memref) { - %0 = dim %A, 0 : memref - %1 = dim %A, 1 : memref - %2 = dim %A, 2 : memref - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> - // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { - // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> - affine.for %i0 = 0 to %0 { - affine.for %i1 = 0 to %1 { - affine.for %i2 = 0 to %2 { - %a2 = affine.load %A[%i2, %i1, %i0] : memref - } - } - affine.for %i3 = 0 to %1 { - affine.for %i4 = 0 to %2 { - %a4 = affine.load %A[%i3, %i4, %i0] : memref - } - affine.for %i5 = 0 to %2 { - %a5 = affine.load %A[%i3, %i5, %i0] : memref - } - } - } - return + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %0 = dim %A, %c0 : memref + %1 = dim %A, %c1 : memref + %2 = dim %A, %c2 : memref + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> + // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 256 { + // CHECK: %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref, vector<32x256xf32> + affine.for %i0 = 0 to %0 { + affine.for %i1 = 0 to %1 { + affine.for %i2 = 0 to %2 { + %a2 = affine.load %A[%i2, %i1, %i0] : memref + } + } + affine.for %i3 = 0 to %1 { + affine.for %i4 = 0 to %2 { + %a4 = affine.load %A[%i3, %i4, %i0] : memref + } + affine.for %i5 = 0 to %2 { + %a5 = affine.load %A[%i3, %i5, %i0] : memref + } + } + } + return } diff --git a/mlir/test/Dialect/Affine/dma-generate.mlir b/mlir/test/Dialect/Affine/dma-generate.mlir --- a/mlir/test/Dialect/Affine/dma-generate.mlir +++ b/mlir/test/Dialect/Affine/dma-generate.mlir @@ -271,8 +271,9 @@ // CHECK-LABEL: func @dma_unknown_size func @dma_unknown_size(%arg0: memref) { - %M = dim %arg0, 0 : memref - %N = dim %arg0, 0 : memref + %c0 = constant 0 : index + %M = dim %arg0, %c0 : memref + %N = dim %arg0, %c0 : memref affine.for %i = 0 to %M { affine.for %j = 0 to %N { // If this loop nest isn't tiled, the access requires a non-constant DMA diff --git a/mlir/test/Dialect/Affine/invalid.mlir b/mlir/test/Dialect/Affine/invalid.mlir --- a/mlir/test/Dialect/Affine/invalid.mlir +++ b/mlir/test/Dialect/Affine/invalid.mlir @@ -122,7 +122,8 @@ func @affine_if_invalid_dimop_dim(%arg0: index, %arg1: index, %arg2: index, %arg3: index) { affine.for %n0 = 0 to 7 { %0 = alloc(%arg0, %arg1, %arg2, %arg3) : memref - %dim = dim %0, 0 : memref + %c0 = constant 0 : index + %dim = dim %0, %c0 : memref // expected-error@+1 {{operand cannot be used as a symbol}} affine.if #set0(%dim)[%n0] {} diff --git a/mlir/test/Dialect/Affine/loop-tiling.mlir b/mlir/test/Dialect/Affine/loop-tiling.mlir --- a/mlir/test/Dialect/Affine/loop-tiling.mlir +++ b/mlir/test/Dialect/Affine/loop-tiling.mlir @@ -66,7 +66,8 @@ #ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)> // CHECK-LABEL: func @loop_max_min_bound(%{{.*}}: memref, %{{.*}}: index, %{{.*}}: index) { func @loop_max_min_bound(%A : memref, %L : index, %U : index) { - %M = dim %A, 0 : memref + %c0 = constant 0 : index + %M = dim %A, %c0 : memref affine.for %i = max #lb()[%L] to min #ub()[%M, %U] { addi %i, %i : index } @@ -111,7 +112,8 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref, %arg1: memref, %arg2: memref) { %cst = constant 0.000000e+00 : f32 - %0 = dim %arg0, 0 : memref + %c0 = constant 0 : index + %0 = dim %arg0, %c0 : memref affine.for %i0 = 0 to %0 { affine.for %i1 = 0 to %0 { affine.store %cst, %arg2[%i0, %i1] : memref @@ -128,7 +130,7 @@ return } -// CHECK: dim %{{.*}}, 0 : memref +// CHECK: dim %{{.*}}, %c0 : memref // CHECK-NEXT: affine.for %{{.*}} = 0 to %{{.*}} step 32 { // CHECK-NEXT: affine.for %{{.*}} = 0 to %{{.*}} step 32 { // CHECK-NEXT: affine.for %{{.*}} = #map3(%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}] { @@ -155,14 +157,15 @@ // CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0 + s1)> func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref, %limit: index) { - %dim0 = dim %arg0, 0 : memref + %c0 = constant 0 : index + %dim0 = dim %arg0, %c0 : memref affine.for %i0 = 0 to affine_map<()[s0, s1] -> (s0 + s1)> ()[%dim0, %limit] { %v0 = affine.load %arg0[%i0] : memref } return } -// CHECK: dim %{{.*}}, 0 : memref +// CHECK: dim %{{.*}}, %c0 : memref // CHECK-NEXT: affine.for %{{.*}} = 0 to [[MAP1]]()[%{{.*}}, %{{.*}}] step 32 { // CHECK-NEXT: affine.for %{{.*}} = [[MAP0]](%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}, %{{.*}}] { // CHECK-NEXT: affine.load diff --git a/mlir/test/Dialect/Affine/ops.mlir b/mlir/test/Dialect/Affine/ops.mlir --- a/mlir/test/Dialect/Affine/ops.mlir +++ b/mlir/test/Dialect/Affine/ops.mlir @@ -95,16 +95,16 @@ // ----- func @valid_symbols(%arg0: index, %arg1: index, %arg2: index) { - %c0 = constant 1 : index - %c1 = constant 0 : index + %c1 = constant 1 : index + %c0 = constant 0 : index %0 = alloc(%arg0, %arg1) : memref affine.for %arg3 = 0 to %arg2 step 768 { - %13 = dim %0, 1 : memref + %13 = dim %0, %c1 : memref affine.for %arg4 = 0 to %13 step 264 { - %18 = dim %0, 0 : memref + %18 = dim %0, %c0 : memref %20 = std.subview %0[%c0, %c0][%18,%arg4][%c1,%c1] : memref to memref - %24 = dim %20, 0 : memref + %24 = dim %20, %c0 : memref affine.for %arg5 = 0 to %24 step 768 { "foo"() : () -> () } diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir --- a/mlir/test/Dialect/GPU/outlining.mlir +++ b/mlir/test/Dialect/GPU/outlining.mlir @@ -92,8 +92,9 @@ // CHECK: %[[CST:.*]] = constant 8 : index %cst = constant 8 : index %cst2 = constant 2 : index - %cst3 = dim %arg0, 0 : memref - // CHECK: "gpu.launch_func"(%[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %{{.*}}) {kernel = @extra_constants_kernel::@extra_constants_kernel} : (index, index, index, index, index, index, memref) -> () + %c0 = constant 0 : index + %cst3 = dim %arg0, %c0 : memref + // CHECK: "gpu.launch_func"(%[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %{{.*}}, %{{.*}}) {kernel = @extra_constants_kernel::@extra_constants_kernel} : (index, index, index, index, index, index, memref, index) -> () gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst, %grid_z = %cst) threads(%tx, %ty, %tz) in (%block_x = %cst, %block_y = %cst, @@ -104,7 +105,7 @@ return } -// CHECK-LABEL: func @extra_constants_kernel(%{{.*}}: memref) +// CHECK-LABEL: func @extra_constants_kernel(%{{.*}}: memref, %{{.*}}: index) // CHECK: constant // CHECK: constant @@ -122,7 +123,7 @@ gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1) threads(%tx, %ty, %tz) in (%block_x = %c1, %block_y = %c1, - %block_z = %c1) { + %block_z = %c1) { "use1"(%c2, %c2) : (index, index) -> () "use2"(%c2) : (index) -> () gpu.terminator diff --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir --- a/mlir/test/Dialect/Linalg/affine.mlir +++ b/mlir/test/Dialect/Linalg/affine.mlir @@ -43,11 +43,11 @@ // CHECK-LABEL: func @conv_view3( // CHECK: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECK: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECK: %[[Q:.*]] = dim %arg0, 1 : memref -// CHECK: %[[K:.*]] = dim %arg0, 2 : memref -// CHECK: %[[B:.*]] = dim %arg1, 0 : memref -// CHECK: %[[X0:.*]] = dim %arg2, 1 : memref +// CHECK: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECK: %[[Q:.*]] = dim %arg0, %c1 : memref +// CHECK: %[[K:.*]] = dim %arg0, %c2 : memref +// CHECK: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECK: %[[X0:.*]] = dim %arg2, %c1 : memref // CHECK: affine.for %{{.*}} = 0 to %[[B]] { // CHECK: affine.for %{{.*}} = 0 to %[[X0]] { // CHECK: affine.for %{{.*}} = 0 to %[[K]] { @@ -70,13 +70,13 @@ // CHECK-LABEL: func @conv_padding // CHECK: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { // CHECK: %[[ZERO:.*]] = constant 0.000000e+00 : f32 -// CHECK: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECK: %[[Z1:.*]] = dim %arg0, 1 : memref -// CHECK: %[[Q:.*]] = dim %arg0, 2 : memref -// CHECK: %[[K:.*]] = dim %arg0, 3 : memref -// CHECK: %[[B:.*]] = dim %arg1, 0 : memref -// CHECK: %[[X0:.*]] = dim %arg2, 1 : memref -// CHECK: %[[X1:.*]] = dim %arg2, 2 : memref +// CHECK: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECK: %[[Z1:.*]] = dim %arg0, %c1 : memref +// CHECK: %[[Q:.*]] = dim %arg0, %c2 : memref +// CHECK: %[[K:.*]] = dim %arg0, %c3 : memref +// CHECK: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECK: %[[X0:.*]] = dim %arg2, %c1 : memref +// CHECK: %[[X1:.*]] = dim %arg2, %c2 : memref // CHECK: affine.for %{{.*}} = 0 to %[[B]] { // CHECK: affine.for %{{.*}} = 0 to %[[X0]] { // CHECK: affine.for %{{.*}} = 0 to %[[X1]] { @@ -109,10 +109,10 @@ // CHECK-SAME: %[[mA:[a-zA-Z0-9]+]]: memref // CHECK-SAME: %[[mB:[a-zA-Z0-9]+]]: memref // CHECK-SAME: %[[mC:[a-zA-Z0-9]+]]: memref -// CHECK: %[[B:.*]] = dim %[[mA]], 0 : memref -// CHECK: %[[M:.*]] = dim %[[mA]], 1 : memref -// CHECK: %[[K:.*]] = dim %[[mA]], 2 : memref -// CHECK: %[[N:.*]] = dim %[[mB]], 2 : memref +// CHECK: %[[B:.*]] = dim %[[mA]], %c0 : memref +// CHECK: %[[M:.*]] = dim %[[mA]], %c1 : memref +// CHECK: %[[K:.*]] = dim %[[mA]], %c2 : memref +// CHECK: %[[N:.*]] = dim %[[mB]], %c2 : memref // CHECK: affine.for %[[b:.*]] = 0 to %[[B]] { // CHECK: affine.for %[[m:.*]] = 0 to %[[M]] { // CHECK: affine.for %[[n:.*]] = 0 to %[[N]] { diff --git a/mlir/test/Dialect/Linalg/fusion-2-level.mlir b/mlir/test/Dialect/Linalg/fusion-2-level.mlir --- a/mlir/test/Dialect/Linalg/fusion-2-level.mlir +++ b/mlir/test/Dialect/Linalg/fusion-2-level.mlir @@ -9,9 +9,9 @@ %c40 = constant 40 : index %c30 = constant 30 : index %c20 = constant 20 : index - %0 = dim %C, 0 : memref - %1 = dim %C, 1 : memref - %2 = dim %D, 1 : memref + %0 = dim %C, %c0 : memref + %1 = dim %C, %c1 : memref + %2 = dim %D, %c1 : memref linalg.matmul(%A, %B, %C) : memref, memref, memref scf.for %arg5 = %c0 to %0 step %c20 { scf.for %arg6 = %c0 to %2 step %c30 { @@ -19,9 +19,9 @@ %5 = std.subview %C[%arg5, %arg7][%c20, %c40][%c1, %c1] : memref to memref %7 = std.subview %D[%arg7, %arg6][%c40, %c30][%c1, %c1]: memref to memref %8 = std.subview %E[%arg5, %arg6][%c20, %c40][%c1, %c1] : memref to memref - %9 = dim %5, 0 : memref - %10 = dim %5, 1 : memref - %11 = dim %7, 1 : memref + %9 = dim %5, %c0 : memref + %10 = dim %5, %c1 : memref + %11 = dim %7, %c1 : memref scf.for %arg8 = %c0 to %9 step %c2 { scf.for %arg9 = %c0 to %11 step %c3 { scf.for %arg10 = %c0 to %10 step %c4 { diff --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir --- a/mlir/test/Dialect/Linalg/fusion.mlir +++ b/mlir/test/Dialect/Linalg/fusion.mlir @@ -10,14 +10,14 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - %0 = dim %A, 0 : memref - %1 = dim %A, 1 : memref - %2 = dim %B, 1 : memref + %c1 = constant 1 : index + %0 = dim %A, %c0 : memref + %1 = dim %A, %c1 : memref + %2 = dim %B, %c1 : memref linalg.matmul(%A, %B, %C) : memref, memref, memref - %c1 = constant 1 : index scf.for %arg5 = %c0 to %0 step %c2 { scf.for %arg6 = %c0 to %2 step %c3 { scf.for %arg7 = %c0 to %1 step %c4 { @@ -65,9 +65,9 @@ memref, memref, memref - %0 = dim %C, 0 : memref - %1 = dim %C, 1 : memref - %2 = dim %D, 1 : memref + %0 = dim %C, %c0 : memref + %1 = dim %C, %c1 : memref + %2 = dim %D, %c1 : memref scf.for %arg5 = %c0 to %0 step %c2 { scf.for %arg6 = %c0 to %2 step %c3 { scf.for %arg7 = %c0 to %1 step %c4 { @@ -91,9 +91,9 @@ } // CHECK-LABEL: func @f2 // CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}}) -// CHECK-DAG: %[[C_0:.*]] = dim %[[C]], 0 : memref -// CHECK-DAG: %[[C_1:.*]] = dim %[[C]], 1 : memref -// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], 1 : memref +// CHECK-DAG: %[[C_0:.*]] = dim %[[C]], %c0{{[_0-9]*}} : memref +// CHECK-DAG: %[[C_1:.*]] = dim %[[C]], %c1{{[_0-9]*}} : memref +// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], %c1{{[_0-9]*}} : memref // CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} { @@ -117,9 +117,9 @@ memref, memref, memref - %0 = dim %D, 0 : memref - %1 = dim %D, 1 : memref - %2 = dim %C, 1 : memref + %0 = dim %D, %c0 : memref + %1 = dim %D, %c1 : memref + %2 = dim %C, %c1 : memref scf.for %arg5 = %c0 to %0 step %c2 { scf.for %arg6 = %c0 to %2 step %c3 { scf.for %arg7 = %c0 to %1 step %c4 { @@ -143,9 +143,9 @@ } // CHECK-LABEL: func @f3 // CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}}) -// CHECK: %[[D_0:.*]] = dim %[[D]], 0 : memref -// CHECK: %[[D_1:.*]] = dim %[[D]], 1 : memref -// CHECK: %[[C_1:.*]] = dim %[[C]], 1 : memref +// CHECK: %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref +// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref +// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref // CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_0]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} { @@ -173,9 +173,9 @@ memref, memref, memref - %0 = dim %C, 0 : memref - %1 = dim %C, 1 : memref - %2 = dim %D, 1 : memref + %0 = dim %C, %c0 : memref + %1 = dim %C, %c1 : memref + %2 = dim %D, %c1 : memref scf.for %arg5 = %c0 to %0 step %c2 { scf.for %arg6 = %c0 to %2 step %c3 { scf.for %arg7 = %c0 to %1 step %c4 { @@ -199,9 +199,9 @@ } // CHECK-LABEL: func @f4 // CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}}) -// CHECK: %[[C_0:.*]] = dim %[[C]], 0 : memref -// CHECK: %[[C_1:.*]] = dim %[[C]], 1 : memref -// CHECK: %[[D_1:.*]] = dim %[[D]], 1 : memref +// CHECK: %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref +// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref +// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref // CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} { @@ -224,9 +224,9 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - %0 = dim %B, 1 : memref - %1 = dim %D, 0 : memref - %2 = dim %D, 1 : memref + %0 = dim %B, %c1 : memref + %1 = dim %D, %c0 : memref + %2 = dim %D, %c1 : memref linalg.matmul(%A, %B, %C) : memref, memref, @@ -258,9 +258,9 @@ } // CHECK-LABEL: func @f5 // CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}}) -// CHECK-DAG: %[[B_1:.*]] = dim %[[B]], 1 : memref -// CHECK-DAG: %[[D_0:.*]] = dim %[[D]], 0 : memref -// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], 1 : memref +// CHECK-DAG: %[[B_1:.*]] = dim %[[B]], %c1{{_[0-9]*}} : memref +// CHECK-DAG: %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref +// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref // CHECK: scf.for %[[I:.*]] = %{{.*}} to %[[D_0]] step %{{.*}} { // CHECK: scf.for %[[J:.*]] = %{{.*}} to %[[B_1]] step %{{.*}} { // CHECK: scf.for %[[K:.*]] = %{{.*}} to %[[D_1]] step %{{.*}} { @@ -296,7 +296,7 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - %0 = dim %C, 1 : memref + %0 = dim %C, %c1 : memref linalg.matmul(%A, %B, %C) : memref, memref, @@ -305,8 +305,8 @@ memref, memref, memref - %1 = dim %C, 0 : memref - %2 = dim %D, 1 : memref + %1 = dim %C, %c0 : memref + %2 = dim %D, %c1 : memref scf.for %arg5 = %c0 to %1 step %c2 { scf.for %arg6 = %c0 to %2 step %c3 { scf.for %arg7 = %c0 to %0 step %c4 { @@ -354,11 +354,11 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - %0 = dim %A, 0 : memref - %1 = dim %A, 1 : memref - %2 = dim %C, 1 : memref - %3 = dim %C, 0 : memref - %4 = dim %D, 1 : memref + %0 = dim %A, %c0 : memref + %1 = dim %A, %c1 : memref + %2 = dim %C, %c1 : memref + %3 = dim %C, %c0 : memref + %4 = dim %D, %c1 : memref linalg.matmul(%A, %C, %E) : memref, memref, @@ -409,11 +409,11 @@ } // CHECK-LABEL: func @f7 // CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}}) -// CHECK: %[[A_0:.*]] = dim %[[A]], 0 : memref -// CHECK: %[[A_1:.*]] = dim %[[A]], 1 : memref -// CHECK: %[[C_1:.*]] = dim %[[C]], 1 : memref -// CHECK: %[[C_0:.*]] = dim %[[C]], 0 : memref -// CHECK: %[[D_1:.*]] = dim %[[D]], 1 : memref +// CHECK: %[[A_0:.*]] = dim %[[A]], %c0{{_[0-9]*}} : memref +// CHECK: %[[A_1:.*]] = dim %[[A]], %c1{{_[0-9]*}} : memref +// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref +// CHECK: %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref +// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref // CHECK: linalg.matmul(%[[A]], %[[C]], %[[E]]) // CHECK: scf.for %{{.*}} = %{{.*}} to %[[A_0]] step %{{.*}} { // CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} { @@ -443,8 +443,8 @@ %c4 = constant 4 : index %c3 = constant 3 : index %c2 = constant 2 : index - %0 = dim %A, 0 : memref - %1 = dim %A, 1 : memref + %0 = dim %A, %c0 : memref + %1 = dim %A, %c1 : memref linalg.matmul(%A, %C, %D) : memref, memref, @@ -453,7 +453,7 @@ memref, memref, memref - %2 = dim %D, 1 : memref + %2 = dim %D, %c1 : memref scf.for %arg5 = %c0 to %0 step %c2 { scf.for %arg6 = %c0 to %2 step %c3 { scf.for %arg7 = %c0 to %1 step %c4 { @@ -512,8 +512,8 @@ }: memref, memref, memref - %0 = dim %B, 0 : memref - %1 = dim %B, 1 : memref + %0 = dim %B, %c0 : memref + %1 = dim %B, %c1 : memref scf.for %arg4 = %c0 to %0 step %c2 { scf.for %arg5 = %c0 to %1 step %c3 { %4 = std.subview %B[%arg4, %arg5][%c2, %c3][%c1, %c1] : @@ -571,8 +571,8 @@ }: memref, memref, memref - %0 = dim %B, 0 : memref - %1 = dim %B, 1 : memref + %0 = dim %B, %c0 : memref + %1 = dim %B, %c1 : memref scf.for %arg4 = %c0 to %0 step %c2 { scf.for %arg5 = %c0 to %1 step %c3 { %4 = std.subview %B[%arg4, %arg5][%c2, %c3][%c1, %c1] : @@ -638,10 +638,10 @@ linalg.yield %2 : f32 }: memref<100x10xf32>, memref<100x10xf32>, memref<100x10xf32> dealloc %0 : memref<100x10xf32> - %2 = dim %1, 0 : memref<100x10xf32> - %3 = dim %1, 1 : memref<100x10xf32> - %4 = dim %arg2, 0 : memref<100x10xf32> - %5 = dim %arg2, 1 : memref<100x10xf32> + %2 = dim %1, %c0 : memref<100x10xf32> + %3 = dim %1, %c1 : memref<100x10xf32> + %4 = dim %arg2, %c0 : memref<100x10xf32> + %5 = dim %arg2, %c1 : memref<100x10xf32> scf.for %i = %c0 to %2 step %c1 { scf.for %j = %c0 to %3 step %c1 { %6 = std.subview %1[%i, %j][%c1, %c1][%c1, %c1] : @@ -693,15 +693,15 @@ %c0 = constant 0 : index %c2 = constant 2 : index %c3 = constant 3 : index - %4 = dim %arg1, 0 : memref<2x3x1x1xf32> - %5 = dim %arg1, 1 : memref<2x3x1x1xf32> - %6 = dim %arg0, 0 : memref<1x4x5x1xf32> - %7 = dim %arg0, 1 : memref<1x4x5x1xf32> - %8 = dim %arg0, 3 : memref<1x4x5x1xf32> - %9 = dim %arg2, 0 : memref<1x4x5x1xf32> - %10 = dim %arg2, 1 : memref<1x4x5x1xf32> - %11 = dim %arg2, 2 : memref<1x4x5x1xf32> - %12 = dim %arg2, 3 : memref<1x4x5x1xf32> + %4 = dim %arg1, %c0 : memref<2x3x1x1xf32> + %5 = dim %arg1, %c1 : memref<2x3x1x1xf32> + %6 = dim %arg0, %c0 : memref<1x4x5x1xf32> + %7 = dim %arg0, %c1 : memref<1x4x5x1xf32> + %8 = dim %arg0, %c3 : memref<1x4x5x1xf32> + %9 = dim %arg2, %c0 : memref<1x4x5x1xf32> + %10 = dim %arg2, %c1 : memref<1x4x5x1xf32> + %11 = dim %arg2, %c2 : memref<1x4x5x1xf32> + %12 = dim %arg2, %c3 : memref<1x4x5x1xf32> %13 = linalg.range %c0 : %6 : %c2 : !linalg.range %14 = linalg.range %c0 : %10 : %c3 : !linalg.range scf.for %arg3 = %c0 to %6 step %c2 { @@ -709,13 +709,13 @@ %15 = affine.min #map0(%c2, %c1, %arg3) %16 = affine.apply #map2()[%7] %17 = affine.min #map0(%16, %c4, %arg4) - %18 = dim %arg0, 2 : memref<1x4x5x1xf32> - %19 = dim %arg0, 3 : memref<1x4x5x1xf32> + %18 = dim %arg0, %c2 : memref<1x4x5x1xf32> + %19 = dim %arg0, %c3 : memref<1x4x5x1xf32> %20 = subview %arg0[%arg3, %arg4, %c0, %c0] [%15, %17, %18, %19] [%c1, %c1, %c1, %c1] : memref<1x4x5x1xf32> to memref %21 = affine.min #map0(%c2, %c1, %arg3) %22 = affine.min #map0(%c3, %c4, %arg4) - %23 = dim %arg2, 2 : memref<1x4x5x1xf32> - %24 = dim %arg2, 3 : memref<1x4x5x1xf32> + %23 = dim %arg2, %c2 : memref<1x4x5x1xf32> + %24 = dim %arg2, %c3 : memref<1x4x5x1xf32> %25 = subview %arg2[%arg3, %arg4, %c0, %c0] [%21, %22, %23, %24] [%c1, %c1, %c1, %c1] : memref<1x4x5x1xf32> to memref linalg.conv(%arg1, %20, %25) {dilations = [1, 1], strides = [1, 1]} : memref<2x3x1x1xf32>, memref, memref } diff --git a/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir --- a/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir +++ b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir @@ -21,10 +21,10 @@ %c0 = constant 0 : index %c25 = constant 25 : index %c10 = constant 10 : index - %0 = dim %C, 0 : memref - %1 = dim %C, 1 : memref - %2 = dim %D, 0 : memref - %3 = dim %D, 1 : memref + %0 = dim %C, %c0 : memref + %1 = dim %C, %c1 : memref + %2 = dim %D, %c0 : memref + %3 = dim %D, %c1 : memref scf.for %arg2 = %c0 to %0 step %c10 { scf.for %arg3 = %c0 to %1 step %c25 { %4 = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] : @@ -87,10 +87,10 @@ %out = addf %ab, %i_float : f32 linalg.yield %out : f32 }: memref, memref, memref - %C_X = dim %C, 0 : memref - %C_Y = dim %C, 1 : memref - %D_X = dim %D, 0 : memref - %D_Y = dim %D, 1 : memref + %C_X = dim %C, %c0 : memref + %C_Y = dim %C, %c1 : memref + %D_X = dim %D, %c0 : memref + %D_Y = dim %D, %c1 : memref scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%C_X, %C_Y) step (%c10, %c25) { %C_view = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] : memref to memref @@ -145,10 +145,10 @@ %out = addf %ab, %j_float : f32 linalg.yield %out : f32 }: memref, memref, memref - %C_X = dim %C, 0 : memref - %C_Y = dim %C, 1 : memref - %D_X = dim %D, 0 : memref - %D_Y = dim %D, 1 : memref + %C_X = dim %C, %c0 : memref + %C_Y = dim %C, %c1 : memref + %D_X = dim %D, %c0 : memref + %D_Y = dim %D, %c1 : memref %3 = linalg.range %c0 : %C_Y : %c3 : !linalg.range scf.parallel (%j) = (%c0) to (%C_Y) step (%c3) { %0 = affine.min affine_map<(d0, d1, d2) -> (d0, d1 - d2)>(%c3, %C_Y, %j) diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir --- a/mlir/test/Dialect/Linalg/loops.mlir +++ b/mlir/test/Dialect/Linalg/loops.mlir @@ -153,7 +153,7 @@ } // CHECKLOOP-LABEL: func @dot_view( // CHECKLOOP: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECKLOOP: %[[K:.*]] = dim %arg0, 0 : memref +// CHECKLOOP: %[[K:.*]] = dim %arg0, %c0 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { // CHECKLOOP-DAG: %[[a:.*]] = load %arg0[%{{.*}}] : memref // CHECKLOOP-DAG: %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref @@ -164,7 +164,7 @@ // CHECKPARALLEL-LABEL: func @dot_view( // CHECKPARALLEL: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECKPARALLEL: %[[K:.*]] = dim %arg0, 0 : memref +// CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c0 : memref // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { // CHECKPARALLEL-DAG: %[[a:.*]] = load %arg0[%{{.*}}] : memref // CHECKPARALLEL-DAG: %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref @@ -267,11 +267,11 @@ } // CHECKLOOP-LABEL: func @conv_view3( // CHECKLOOP: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECKLOOP: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECKLOOP: %[[Q:.*]] = dim %arg0, 1 : memref -// CHECKLOOP: %[[K:.*]] = dim %arg0, 2 : memref -// CHECKLOOP: %[[B:.*]] = dim %arg1, 0 : memref -// CHECKLOOP: %[[X0:.*]] = dim %arg2, 1 : memref +// CHECKLOOP: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECKLOOP: %[[Q:.*]] = dim %arg0, %c1 : memref +// CHECKLOOP: %[[K:.*]] = dim %arg0, %c2 : memref +// CHECKLOOP: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECKLOOP: %[[X0:.*]] = dim %arg2, %c1 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { @@ -287,11 +287,11 @@ // CHECKPARALLEL-LABEL: func @conv_view3( // CHECKPARALLEL: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECKPARALLEL: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECKPARALLEL: %[[Q:.*]] = dim %arg0, 1 : memref -// CHECKPARALLEL: %[[K:.*]] = dim %arg0, 2 : memref -// CHECKPARALLEL: %[[B:.*]] = dim %arg1, 0 : memref -// CHECKPARALLEL: %[[X0:.*]] = dim %arg2, 1 : memref +// CHECKPARALLEL: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECKPARALLEL: %[[Q:.*]] = dim %arg0, %c1 : memref +// CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c2 : memref +// CHECKPARALLEL: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECKPARALLEL: %[[X0:.*]] = dim %arg2, %c1 : memref // CHECKPARALLEL: scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}) { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} { @@ -309,13 +309,13 @@ } // CHECKLOOP-LABEL: func @conv_view4( // CHECKLOOP: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECKLOOP: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECKLOOP: %[[Z1:.*]] = dim %arg0, 1 : memref -// CHECKLOOP: %[[Q:.*]] = dim %arg0, 2 : memref -// CHECKLOOP: %[[K:.*]] = dim %arg0, 3 : memref -// CHECKLOOP: %[[B:.*]] = dim %arg1, 0 : memref -// CHECKLOOP: %[[X0:.*]] = dim %arg2, 1 : memref -// CHECKLOOP: %[[X1:.*]] = dim %arg2, 2 : memref +// CHECKLOOP: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECKLOOP: %[[Z1:.*]] = dim %arg0, %c1 : memref +// CHECKLOOP: %[[Q:.*]] = dim %arg0, %c2 : memref +// CHECKLOOP: %[[K:.*]] = dim %arg0, %c3 : memref +// CHECKLOOP: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECKLOOP: %[[X0:.*]] = dim %arg2, %c1 : memref +// CHECKLOOP: %[[X1:.*]] = dim %arg2, %c2 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[X1]] step %{{.*}} { @@ -334,13 +334,13 @@ // CHECKPARALLEL-LABEL: func @conv_view4( // CHECKPARALLEL: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { -// CHECKPARALLEL: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECKPARALLEL: %[[Z1:.*]] = dim %arg0, 1 : memref -// CHECKPARALLEL: %[[Q:.*]] = dim %arg0, 2 : memref -// CHECKPARALLEL: %[[K:.*]] = dim %arg0, 3 : memref -// CHECKPARALLEL: %[[B:.*]] = dim %arg1, 0 : memref -// CHECKPARALLEL: %[[X0:.*]] = dim %arg2, 1 : memref -// CHECKPARALLEL: %[[X1:.*]] = dim %arg2, 2 : memref +// CHECKPARALLEL: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECKPARALLEL: %[[Z1:.*]] = dim %arg0, %c1 : memref +// CHECKPARALLEL: %[[Q:.*]] = dim %arg0, %c2 : memref +// CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c3 : memref +// CHECKPARALLEL: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECKPARALLEL: %[[X0:.*]] = dim %arg2, %c1 : memref +// CHECKPARALLEL: %[[X1:.*]] = dim %arg2, %c2 : memref // CHECKPARALLEL: scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[X1]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} { @@ -366,13 +366,13 @@ // CHECKLOOP-LABEL: func @conv_padding // CHECKLOOP: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { // CHECKLOOP: %[[ZERO:.*]] = constant 0.000000e+00 : f32 -// CHECKLOOP: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECKLOOP: %[[Z1:.*]] = dim %arg0, 1 : memref -// CHECKLOOP: %[[Q:.*]] = dim %arg0, 2 : memref -// CHECKLOOP: %[[K:.*]] = dim %arg0, 3 : memref -// CHECKLOOP: %[[B:.*]] = dim %arg1, 0 : memref -// CHECKLOOP: %[[X0:.*]] = dim %arg2, 1 : memref -// CHECKLOOP: %[[X1:.*]] = dim %arg2, 2 : memref +// CHECKLOOP: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECKLOOP: %[[Z1:.*]] = dim %arg0, %c1 : memref +// CHECKLOOP: %[[Q:.*]] = dim %arg0, %c2 : memref +// CHECKLOOP: %[[K:.*]] = dim %arg0, %c3 : memref +// CHECKLOOP: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECKLOOP: %[[X0:.*]] = dim %arg2, %c1 : memref +// CHECKLOOP: %[[X1:.*]] = dim %arg2, %c2 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[X1]] step %{{.*}} { @@ -395,13 +395,13 @@ // CHECKPARALLEL-LABEL: func @conv_padding // CHECKPARALLEL: %{{.*}}: memref, %{{.*}}: memref, %{{.*}}: memref) { // CHECKPARALLEL: %[[ZERO:.*]] = constant 0.000000e+00 : f32 -// CHECKPARALLEL: %[[Z0:.*]] = dim %arg0, 0 : memref -// CHECKPARALLEL: %[[Z1:.*]] = dim %arg0, 1 : memref -// CHECKPARALLEL: %[[Q:.*]] = dim %arg0, 2 : memref -// CHECKPARALLEL: %[[K:.*]] = dim %arg0, 3 : memref -// CHECKPARALLEL: %[[B:.*]] = dim %arg1, 0 : memref -// CHECKPARALLEL: %[[X0:.*]] = dim %arg2, 1 : memref -// CHECKPARALLEL: %[[X1:.*]] = dim %arg2, 2 : memref +// CHECKPARALLEL: %[[Z0:.*]] = dim %arg0, %c0 : memref +// CHECKPARALLEL: %[[Z1:.*]] = dim %arg0, %c1 : memref +// CHECKPARALLEL: %[[Q:.*]] = dim %arg0, %c2 : memref +// CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c3 : memref +// CHECKPARALLEL: %[[B:.*]] = dim %arg1, %c0 : memref +// CHECKPARALLEL: %[[X0:.*]] = dim %arg2, %c1 : memref +// CHECKPARALLEL: %[[X1:.*]] = dim %arg2, %c2 : memref // CHECKPARALLEL: scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[X1]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} { @@ -426,10 +426,10 @@ return } // CHECKLOOP-LABEL: func @pooling_max -// CHECKLOOP: %[[WX:.*]] = dim %arg1, 0 : memref -// CHECKLOOP: %[[WY:.*]] = dim %arg1, 1 : memref -// CHECKLOOP: %[[OX:.*]] = dim %arg2, 0 : memref -// CHECKLOOP: %[[OY:.*]] = dim %arg2, 1 : memref +// CHECKLOOP: %[[WX:.*]] = dim %arg1, %c0 : memref +// CHECKLOOP: %[[WY:.*]] = dim %arg1, %c1 : memref +// CHECKLOOP: %[[OX:.*]] = dim %arg2, %c0 : memref +// CHECKLOOP: %[[OY:.*]] = dim %arg2, %c1 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[OX]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} { @@ -442,10 +442,10 @@ // CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref // CHECKPARALLEL-LABEL: func @pooling_max -// CHECKPARALLEL: %[[WX:.*]] = dim %arg1, 0 : memref -// CHECKPARALLEL: %[[WY:.*]] = dim %arg1, 1 : memref -// CHECKPARALLEL: %[[OX:.*]] = dim %arg2, 0 : memref -// CHECKPARALLEL: %[[OY:.*]] = dim %arg2, 1 : memref +// CHECKPARALLEL: %[[WX:.*]] = dim %arg1, %c0 : memref +// CHECKPARALLEL: %[[WY:.*]] = dim %arg1, %c1 : memref +// CHECKPARALLEL: %[[OX:.*]] = dim %arg2, %c0 : memref +// CHECKPARALLEL: %[[OY:.*]] = dim %arg2, %c1 : memref // CHECKPARALLEL: scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} { @@ -464,10 +464,10 @@ return } // CHECKLOOP-LABEL: func @pooling_min -// CHECKLOOP: %[[WX:.*]] = dim %arg1, 0 : memref -// CHECKLOOP: %[[WY:.*]] = dim %arg1, 1 : memref -// CHECKLOOP: %[[OX:.*]] = dim %arg2, 0 : memref -// CHECKLOOP: %[[OY:.*]] = dim %arg2, 1 : memref +// CHECKLOOP: %[[WX:.*]] = dim %arg1, %c0 : memref +// CHECKLOOP: %[[WY:.*]] = dim %arg1, %c1 : memref +// CHECKLOOP: %[[OX:.*]] = dim %arg2, %c0 : memref +// CHECKLOOP: %[[OY:.*]] = dim %arg2, %c1 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[OX]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} { @@ -480,10 +480,10 @@ // CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref // CHECKPARALLEL-LABEL: func @pooling_min -// CHECKPARALLEL: %[[WX:.*]] = dim %arg1, 0 : memref -// CHECKPARALLEL: %[[WY:.*]] = dim %arg1, 1 : memref -// CHECKPARALLEL: %[[OX:.*]] = dim %arg2, 0 : memref -// CHECKPARALLEL: %[[OY:.*]] = dim %arg2, 1 : memref +// CHECKPARALLEL: %[[WX:.*]] = dim %arg1, %c0 : memref +// CHECKPARALLEL: %[[WY:.*]] = dim %arg1, %c1 : memref +// CHECKPARALLEL: %[[OX:.*]] = dim %arg2, %c0 : memref +// CHECKPARALLEL: %[[OY:.*]] = dim %arg2, %c1 : memref // CHECKPARALLEL: scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} { @@ -502,10 +502,10 @@ return } // CHECKLOOP-LABEL: func @pooling_sum -// CHECKLOOP: %[[WX:.*]] = dim %arg1, 0 : memref -// CHECKLOOP: %[[WY:.*]] = dim %arg1, 1 : memref -// CHECKLOOP: %[[OX:.*]] = dim %arg2, 0 : memref -// CHECKLOOP: %[[OY:.*]] = dim %arg2, 1 : memref +// CHECKLOOP: %[[WX:.*]] = dim %arg1, %c0 : memref +// CHECKLOOP: %[[WY:.*]] = dim %arg1, %c1 : memref +// CHECKLOOP: %[[OX:.*]] = dim %arg2, %c0 : memref +// CHECKLOOP: %[[OY:.*]] = dim %arg2, %c1 : memref // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[OX]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} { // CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} { @@ -518,10 +518,10 @@ // CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref // CHECKPARALLEL-LABEL: func @pooling_sum -// CHECKPARALLEL: %[[WX:.*]] = dim %arg1, 0 : memref -// CHECKPARALLEL: %[[WY:.*]] = dim %arg1, 1 : memref -// CHECKPARALLEL: %[[OX:.*]] = dim %arg2, 0 : memref -// CHECKPARALLEL: %[[OY:.*]] = dim %arg2, 1 : memref +// CHECKPARALLEL: %[[WX:.*]] = dim %arg1, %c0 : memref +// CHECKPARALLEL: %[[WY:.*]] = dim %arg1, %c1 : memref +// CHECKPARALLEL: %[[OX:.*]] = dim %arg2, %c0 : memref +// CHECKPARALLEL: %[[OY:.*]] = dim %arg2, %c1 : memref // CHECKPARALLEL: scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} { // CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} { @@ -879,10 +879,10 @@ // CHECKLOOP-SAME: %[[mA:[a-zA-Z0-9]+]]: memref // CHECKLOOP-SAME: %[[mB:[a-zA-Z0-9]+]]: memref // CHECKLOOP-SAME: %[[mC:[a-zA-Z0-9]+]]: memref -// CHECKLOOP: %[[B:.*]] = dim %[[mA]], 0 : memref -// CHECKLOOP: %[[M:.*]] = dim %[[mA]], 1 : memref -// CHECKLOOP: %[[K:.*]] = dim %[[mA]], 2 : memref -// CHECKLOOP: %[[N:.*]] = dim %[[mB]], 2 : memref +// CHECKLOOP: %[[B:.*]] = dim %[[mA]], %c0 : memref +// CHECKLOOP: %[[M:.*]] = dim %[[mA]], %c1 : memref +// CHECKLOOP: %[[K:.*]] = dim %[[mA]], %c2 : memref +// CHECKLOOP: %[[N:.*]] = dim %[[mB]], %c2 : memref // CHECKLOOP: scf.for %[[b:.*]] = %{{.*}} to %[[B]] step %{{.*}} { // CHECKLOOP: scf.for %[[m:.*]] = %{{.*}} to %[[M]] step %{{.*}} { // CHECKLOOP: scf.for %[[n:.*]] = %{{.*}} to %[[N]] step %{{.*}} { @@ -898,10 +898,10 @@ // CHECKPARALLEL-SAME: %[[mA:[a-zA-Z0-9]+]]: memref // CHECKPARALLEL-SAME: %[[mB:[a-zA-Z0-9]+]]: memref // CHECKPARALLEL-SAME: %[[mC:[a-zA-Z0-9]+]]: memref -// CHECKPARALLEL: %[[B:.*]] = dim %[[mA]], 0 : memref -// CHECKPARALLEL: %[[M:.*]] = dim %[[mA]], 1 : memref -// CHECKPARALLEL: %[[K:.*]] = dim %[[mA]], 2 : memref -// CHECKPARALLEL: %[[N:.*]] = dim %[[mB]], 2 : memref +// CHECKPARALLEL: %[[B:.*]] = dim %[[mA]], %c0 : memref +// CHECKPARALLEL: %[[M:.*]] = dim %[[mA]], %c1 : memref +// CHECKPARALLEL: %[[K:.*]] = dim %[[mA]], %c2 : memref +// CHECKPARALLEL: %[[N:.*]] = dim %[[mB]], %c2 : memref // CHECKPARALLEL: scf.parallel (%[[b:.*]], %[[m:.*]], %[[n:.*]]) = ({{.*}}) to (%[[B]], %[[M]], %[[N]]) step ({{.*}}) { // CHECKPARALLEL: scf.for %[[k:.*]] = %{{.*}} to %[[K]] step %{{.*}} { // CHECKPARALLEL: %[[va:.*]] = load %[[mA]][%[[b]], %[[m]], %[[k]]] : memref diff --git a/mlir/test/Dialect/Linalg/parallel_loops.mlir b/mlir/test/Dialect/Linalg/parallel_loops.mlir --- a/mlir/test/Dialect/Linalg/parallel_loops.mlir +++ b/mlir/test/Dialect/Linalg/parallel_loops.mlir @@ -51,10 +51,10 @@ // CHECK-LABEL: @lower_outer_parallel // CHECK-DAG: %[[C0:.*]] = constant 0 // CHECK-DAG: %[[C1:.*]] = constant 1 -// CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, 0 -// CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, 1 -// CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, 2 -// CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, 3 +// CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, %c0 +// CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, %c1 +// CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, %c2 +// CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, %c3 // CHECK: scf.parallel (%[[IV0:.*]], %[[IV1:.*]]) = (%[[C0]], %[[C0]]) to (%[[D0]], %[[D1]]) step (%[[C1]], %[[C1]]) // CHECK: scf.for %[[IV2:.*]] = %[[C0]] to %[[D2]] step %[[C1]] // CHECK: scf.parallel (%[[IV3:.*]]) = (%[[C0]]) to (%[[D3]]) step (%[[C1]]) @@ -84,12 +84,12 @@ // CHECK-LABEL: @lower_mixed_parallel // CHECK-DAG: %[[C0:.*]] = constant 0 // CHECK-DAG: %[[C1:.*]] = constant 1 -// CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, 0 -// CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, 1 -// CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, 2 -// CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, 3 -// CHECK-DAG: %[[D4:.*]] = dim %{{.*}}, 4 -// CHECK-DAG: %[[D5:.*]] = dim %{{.*}}, 5 +// CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, %c0 +// CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, %c1 +// CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, %c2 +// CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, %c3 +// CHECK-DAG: %[[D4:.*]] = dim %{{.*}}, %c4 +// CHECK-DAG: %[[D5:.*]] = dim %{{.*}}, %c5 // CHECK: scf.parallel (%[[IV0:.*]], %[[IV1:.*]]) = (%[[C0]], %[[C0]]) to (%[[D0]], %[[D1]]) step (%[[C1]], %[[C1]]) // CHECK: scf.for %[[IV2:.*]] = %[[C0]] to %[[D2]] step %[[C1]] // CHECK: scf.parallel (%[[IV3:.*]], %[[IV4:.*]]) = (%[[C0]], %[[C0]]) to (%[[D3]], %[[D4]]) step (%[[C1]], %[[C1]]) diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir --- a/mlir/test/Dialect/Linalg/promote.mlir +++ b/mlir/test/Dialect/Linalg/promote.mlir @@ -17,9 +17,9 @@ %3 = view %A[%c0][%M, %K] : memref to memref %4 = view %A[%c0][%K, %N] : memref to memref %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref + %6 = dim %3, %c0 : memref + %7 = dim %3, %c1 : memref + %8 = dim %4, %c1 : memref scf.for %arg4 = %c0 to %6 step %c2 { scf.for %arg5 = %c0 to %8 step %c3 { scf.for %arg6 = %c0 to %7 step %c4 { @@ -79,9 +79,9 @@ %3 = view %A[%c0][%M, %K] : memref to memref %4 = view %A[%c0][%K, %N] : memref to memref %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref + %6 = dim %3, %c0 : memref + %7 = dim %3, %c1 : memref + %8 = dim %4, %c1 : memref scf.for %arg4 = %c0 to %6 step %c2 { scf.for %arg5 = %c0 to %8 step %c3 { scf.for %arg6 = %c0 to %7 step %c4 { @@ -141,9 +141,9 @@ %3 = view %A[%c0][%M, %K] : memref to memref %4 = view %A[%c0][%K, %N] : memref to memref %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref + %6 = dim %3, %c0 : memref + %7 = dim %3, %c1 : memref + %8 = dim %4, %c1 : memref scf.for %arg4 = %c0 to %6 step %c2 { scf.for %arg5 = %c0 to %8 step %c3 { scf.for %arg6 = %c0 to %7 step %c4 { diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir --- a/mlir/test/Dialect/Linalg/tile.mlir +++ b/mlir/test/Dialect/Linalg/tile.mlir @@ -31,29 +31,29 @@ // TILE-2-LABEL: func @matmul( // TILE-2-DAG: %[[C0:.*]] = constant 0 : index // TILE-2-DAG: %[[C2:.*]] = constant 2 : index -// TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref +// TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref // TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} { -// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-2: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]] -// TILE-2: %[[K:.*]] = dim %{{.*}}, 1 : memref +// TILE-2: %[[K:.*]] = dim %{{.*}}, %c1 : memref // TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[K]]] [1, 1] : memref to memref -// TILE-2: %[[localK:.*]] = dim %{{.*}}, 0 +// TILE-2: %[[localK:.*]] = dim %{{.*}}, %c0 // TILE-2: %[[szK:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localK]]] -// TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref +// TILE-2: %[[N:.*]] = dim %{{.*}}, %c1 : memref // TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szK]], %[[N]]] [1, 1] : memref to memref // TILE-2: linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) : memref, memref, memref // TILE-02-LABEL: func @matmul( // TILE-02-DAG: %[[C0:.*]] = constant 0 : index // TILE-02-DAG: %[[C2:.*]] = constant 2 : index -// TILE-02: %[[N:.*]] = dim %arg1, 1 : memref +// TILE-02: %[[N:.*]] = dim %arg1, %c1 : memref // TILE-02: scf.for %[[J:.*]] = %{{.*}} to %[[N]] step %{{.*}} { -// TILE-02: %[[K:.*]] = dim %{{.*}}, 0 : memref -// TILE-02: %[[localN:.*]] = dim %{{.*}}, 1 +// TILE-02: %[[K:.*]] = dim %{{.*}}, %c0 : memref +// TILE-02: %[[localN:.*]] = dim %{{.*}}, %c1 // TILE-02: %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]] // TILE-02: %[[sBj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[K]], %[[szN]]] [1, 1] : memref to memref -// TILE-02: %[[M:.*]] = dim %{{.*}}, 0 : memref -// TILE-02: %[[localK:.*]] = dim %{{.*}}, 1 +// TILE-02: %[[M:.*]] = dim %{{.*}}, %c0 : memref +// TILE-02: %[[localK:.*]] = dim %{{.*}}, %c1 // TILE-02: %[[szK:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localK]]] // TILE-02: %[[sCj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szK]]] [1, 1] : memref to memref // TILE-02: linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref, memref, memref @@ -61,15 +61,15 @@ // TILE-002-LABEL: func @matmul( // TILE-002-DAG: %[[C0:.*]] = constant 0 : index // TILE-002-DAG: %[[C2:.*]] = constant 2 : index -// TILE-002: %[[ubK:.*]] = dim %{{.*}}, 1 : memref +// TILE-002: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref // TILE-002: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} { -// TILE-002: %[[M:.*]] = dim %{{.*}}, 0 : memref -// TILE-002: %[[localK:.*]] = dim %{{.*}}, 1 +// TILE-002: %[[M:.*]] = dim %{{.*}}, %c0 : memref +// TILE-002: %[[localK:.*]] = dim %{{.*}}, %c1 // TILE-002: %[[szK:.*]] = affine.min #[[bound_map]](%[[K]])[%[[localK]]] // TILE-002: %[[sAj:.*]] = subview %{{.*}}[0, %[[K]]] [%[[M]], %[[szK]]] [1, 1] : memref to memref -// TILE-002: %[[localK:.*]] = dim %{{.*}}, 0 +// TILE-002: %[[localK:.*]] = dim %{{.*}}, %c0 // TILE-002: %[[szK:.*]] = affine.min #[[bound_map]](%[[K]])[%[[localK]]] -// TILE-002: %[[N:.*]] = dim %{{.*}}, 1 : memref +// TILE-002: %[[N:.*]] = dim %{{.*}}, %c1 : memref // TILE-002: %[[sBj:.*]] = subview %{{.*}}[%[[K]], 0] [%[[szK]], %[[N]]] [1, 1] : memref to memref // TILE-002: linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref, memref, memref @@ -78,25 +78,25 @@ // TILE-234-DAG: %[[C2:.*]] = constant 2 : index // TILE-234-DAG: %[[C3:.*]] = constant 3 : index // TILE-234-DAG: %[[C4:.*]] = constant 4 : index -// TILE-234: %[[ubM:.*]] = dim %{{.*}}, 0 : memref -// TILE-234: %[[ubK:.*]] = dim %{{.*}}, 1 : memref -// TILE-234: %[[ubN:.*]] = dim %{{.*}}, 1 : memref +// TILE-234: %[[ubM:.*]] = dim %{{.*}}, %c0 : memref +// TILE-234: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref +// TILE-234: %[[ubN:.*]] = dim %{{.*}}, %c1 : memref // TILE-234: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[ubM]] step %{{.*}} { // TILE-234: scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[ubN]] step %{{.*}} { // TILE-234: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} { -// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]] -// TILE-234: %[[localK:.*]] = dim %{{.*}}, 1 +// TILE-234: %[[localK:.*]] = dim %{{.*}}, %c1 // TILE-234: %[[szK:.*]] = affine.min #[[bound_map_4]](%[[K]])[%[[localK]]] // TILE-234: %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [1, 1] : memref to memref -// TILE-234: %[[localK:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localK:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szK:.*]] = affine.min #[[bound_map_4]](%[[K]])[%[[localK]]] -// TILE-234: %[[localN:.*]] = dim %{{.*}}, 1 +// TILE-234: %[[localN:.*]] = dim %{{.*}}, %c1 // TILE-234: %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]] // TILE-234: %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%[[szK]], %[[szN]]] [1, 1] : memref to memref -// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]] -// TILE-234: %[[localN:.*]] = dim %{{.*}}, 1 +// TILE-234: %[[localN:.*]] = dim %{{.*}}, %c1 // TILE-234: %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]] // TILE-234: %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref to memref // @@ -173,13 +173,13 @@ // TILE-2-SAME: %[[ARG2:[0-9a-zA-Z]*]]: memref // TILE-2-DAG: %[[C0:.*]] = constant 0 : index // TILE-2-DAG: %[[C2:.*]] = constant 2 : index -// TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref +// TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref // TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} { -// TILE-2: %[[localM:.*]] = dim %[[ARG0]], 0 +// TILE-2: %[[localM:.*]] = dim %[[ARG0]], %c0 // TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]] -// TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref +// TILE-2: %[[N:.*]] = dim %{{.*}}, %c1 : memref // TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[N]]] [1, 1] : memref to memref -// TILE-2: %[[localN:.*]] = dim %{{.*}}, 0 +// TILE-2: %[[localN:.*]] = dim %{{.*}}, %c0 // TILE-2: %[[szN:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localN]]] // TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szN]]] [1] : memref to memref // TILE-2: linalg.matvec(%[[sAi]], %{{.*}}, %[[sCi]]) : memref, memref, memref @@ -190,13 +190,13 @@ // TILE-02-SAME: %[[ARG2:[0-9a-zA-Z]*]]: memref // TILE-02-DAG: %[[C0:.*]] = constant 0 : index // TILE-02-DAG: %[[C2:.*]] = constant 2 : index -// TILE-02: %[[K:.*]] = dim %{{.*}}, 1 : memref +// TILE-02: %[[K:.*]] = dim %{{.*}}, %c1 : memref // TILE-02: scf.for %[[J]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} { -// TILE-02: %[[M:.*]] = dim %{{.*}}, 0 : memref -// TILE-02: %[[localN:.*]] = dim %{{.*}}, 1 +// TILE-02: %[[M:.*]] = dim %{{.*}}, %c0 : memref +// TILE-02: %[[localN:.*]] = dim %{{.*}}, %c1 // TILE-02: %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]] // TILE-02: %[[sAj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szN]]] [1, 1] : memref to memref -// TILE-02: %[[localN:.*]] = dim %{{.*}}, 0 +// TILE-02: %[[localN:.*]] = dim %{{.*}}, %c0 // TILE-02: %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]] // TILE-02: %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref to memref // TILE-02: linalg.matvec(%[[sAj]], %[[sBj]], %{{.*}}) : memref, memref, memref @@ -214,19 +214,19 @@ // TILE-234-DAG: %[[C0:.*]] = constant 0 : index // TILE-234-DAG: %[[C2:.*]] = constant 2 : index // TILE-234-DAG: %[[C3:.*]] = constant 3 : index -// TILE-234: %[[M:.*]] = dim %{{.*}}, 0 : memref -// TILE-234: %[[K:.*]] = dim %{{.*}}, 1 : memref +// TILE-234: %[[M:.*]] = dim %{{.*}}, %c0 : memref +// TILE-234: %[[K:.*]] = dim %{{.*}}, %c1 : memref // TILE-234: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} { // TILE-234: scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} { -// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]] -// TILE-234: %[[localN:.*]] = dim %{{.*}}, 1 +// TILE-234: %[[localN:.*]] = dim %{{.*}}, %c1 // TILE-234: %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]] // TILE-234: %[[sAij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref to memref -// TILE-234: %[[localN:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localN:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]] // TILE-234: %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref to memref -// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]] // TILE-234: %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref to memref // @@ -239,12 +239,12 @@ // TILE-2-LABEL: func @dot( // TILE-2-DAG: %[[C0:.*]] = constant 0 : index // TILE-2-DAG: %[[C2:.*]] = constant 2 : index -// TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref +// TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref // TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} { -// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-2: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]] // TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref to memref -// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-2: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]] // TILE-2: %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref to memref // TILE-2: linalg.dot(%[[sAi]], %[[sBi]], {{.*}}) : memref, memref, memref @@ -258,12 +258,12 @@ // TILE-234-LABEL: func @dot( // TILE-234-DAG: %[[C0:.*]] = constant 0 : index // TILE-234-DAG: %[[C2:.*]] = constant 2 : index -// TILE-234: %[[ubK:.*]] = dim %{{.*}}, 0 : memref +// TILE-234: %[[ubK:.*]] = dim %{{.*}}, %c0 : memref // TILE-234: scf.for %[[I:.*]] = %{{.*}} to %[[ubK]] step %{{.*}} { -// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]] // TILE-234: %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref to memref -// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0 +// TILE-234: %[[localM:.*]] = dim %{{.*}}, %c0 // TILE-234: %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]] // TILE-234: %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref to memref // TILE-234: linalg.dot(%[[sAi]], %[[sBi]], %{{.*}}) : memref, memref, memref diff --git a/mlir/test/Dialect/Linalg/tile_conv.mlir b/mlir/test/Dialect/Linalg/tile_conv.mlir --- a/mlir/test/Dialect/Linalg/tile_conv.mlir +++ b/mlir/test/Dialect/Linalg/tile_conv.mlir @@ -15,30 +15,30 @@ // TILE-23004-DAG: %[[C2:.*]] = constant 2 : index // TILE-23004-DAG: %[[C3:.*]] = constant 3 : index // TILE-23004-DAG: %[[C4:.*]] = constant 4 : index -// TILE-23004: %[[Q:.*]] = dim %{{.*}}, 2 : memref -// TILE-23004: %[[B:.*]] = dim %{{.*}}, 0 : memref -// TILE-23004: %[[PaddedInput0:.*]] = dim %{{.*}}, 1 : memref -// TILE-23004: %[[X0:.*]] = dim %{{.*}}, 1 : memref +// TILE-23004: %[[Q:.*]] = dim %{{.*}}, %c2 : memref +// TILE-23004: %[[B:.*]] = dim %{{.*}}, %c0 : memref +// TILE-23004: %[[PaddedInput0:.*]] = dim %{{.*}}, %c1 : memref +// TILE-23004: %[[X0:.*]] = dim %{{.*}}, %c1 : memref // TILE-23004: scf.for %[[ivI:.*]] = %{{.*}} to %[[B]] step %{{.*}} { // TILE-23004: scf.for %[[ivJ:.*]] = %{{.*}} to %[[X0]] step %{{.*}} { // TILE-23004: scf.for %[[ivK:.*]] = %{{.*}} to %[[Q]] step %{{.*}} { -// TILE-23004: %[[Z0:.*]] = dim %{{.*}}, 0 : memref -// TILE-23004: %[[Z1:.*]] = dim %{{.*}}, 1 : memref -// TILE-23004: %[[Z2:.*]] = dim %{{.*}}, 2 : memref +// TILE-23004: %[[Z0:.*]] = dim %{{.*}}, %c0 : memref +// TILE-23004: %[[Z1:.*]] = dim %{{.*}}, %c1 : memref +// TILE-23004: %[[Z2:.*]] = dim %{{.*}}, %c2 : memref // TILE-23004: %[[szK:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[Z2]]] -// TILE-23004: %[[K:.*]] = dim %{{.*}}, 3 : memref +// TILE-23004: %[[K:.*]] = dim %{{.*}}, %c3 : memref // TILE-23004: %[[FilterView:.*]] = subview %{{.*}}[0, 0, %[[ivK]], 0] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [1, 1, 1, 1] : memref to memref // // TILE-23004: %[[J1:.*]] = affine.apply #[[D0x30pS0x10]](%[[ivJ]]) -// TILE-23004: %[[PaddedInput0b:.*]] = dim %{{.*}}, 1 : memref +// TILE-23004: %[[PaddedInput0b:.*]] = dim %{{.*}}, %c1 : memref // TILE-23004: %[[I1pStep:.*]] = affine.min #[[S0x10p90D0x30pS1]](%[[ivJ]])[%[[PaddedInput0]], %[[PaddedInput0b]]] -// TILE-23004: %[[SZ2:.*]] = dim %{{.*}}, 2 : memref -// TILE-23004: %[[dim3:.*]] = dim %{{.*}}, 3 +// TILE-23004: %[[SZ2:.*]] = dim %{{.*}}, %c2 : memref +// TILE-23004: %[[dim3:.*]] = dim %{{.*}}, %c3 // TILE-23004: %[[sz3:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[dim3]]] // TILE-23004: %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], 0, %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [1, 1, 1, 1] : memref to memref // -// TILE-23004: %[[X0:.*]] = dim %{{.*}}, 2 : memref -// TILE-23004: %[[X1:.*]] = dim %{{.*}}, 3 : memref +// TILE-23004: %[[X0:.*]] = dim %{{.*}}, %c2 : memref +// TILE-23004: %[[X1:.*]] = dim %{{.*}}, %c3 : memref // TILE-23004: %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], 0, 0] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [1, 1, 1, 1] : memref to memref // // TILE-23004: linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref, memref, memref diff --git a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir --- a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir +++ b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir @@ -21,18 +21,18 @@ // TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref) // TILE-20000-DAG: %[[C0:.*]] = constant 0 : index // TILE-20000-DAG: %[[C2:.*]] = constant 2 : index -// TILE-20000: %[[B:.*]] = dim %[[ARG1]], 0 +// TILE-20000: %[[B:.*]] = dim %[[ARG1]], %c0 // TILE-20000: scf.for %[[ivI:.*]] = %[[C0]] to %[[B]] step %[[C2]] { -// TILE-20000: %[[DIM10:.*]] = dim %[[ARG1]], 0 +// TILE-20000: %[[DIM10:.*]] = dim %[[ARG1]], %c0 // TILE-20000: %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM10]]] -// TILE-20000: %[[DIM11:.*]] = dim %[[ARG1]], 1 -// TILE-20000: %[[DIM12:.*]] = dim %[[ARG1]], 2 -// TILE-20000: %[[DIM13:.*]] = dim %[[ARG1]], 3 +// TILE-20000: %[[DIM11:.*]] = dim %[[ARG1]], %c1 +// TILE-20000: %[[DIM12:.*]] = dim %[[ARG1]], %c2 +// TILE-20000: %[[DIM13:.*]] = dim %[[ARG1]], %c3 // TILE-20000: %[[SUBVIEW1:.*]] = subview %[[ARG1]][%[[ivI]], 0, 0, 0] [%[[EXTENT]], %[[DIM11]], %[[DIM12]], %[[DIM13]]] -// TILE-20000: %[[DIM20:.*]] = dim %[[ARG2]], 0 +// TILE-20000: %[[DIM20:.*]] = dim %[[ARG2]], %c0 // TILE-20000: %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM20]]] -// TILE-20000: %[[DIM21:.*]] = dim %[[ARG2]], 1 -// TILE-20000: %[[DIM22:.*]] = dim %[[ARG2]], 2 -// TILE-20000: %[[DIM23:.*]] = dim %[[ARG2]], 3 +// TILE-20000: %[[DIM21:.*]] = dim %[[ARG2]], %c1 +// TILE-20000: %[[DIM22:.*]] = dim %[[ARG2]], %c2 +// TILE-20000: %[[DIM23:.*]] = dim %[[ARG2]], %c3 // TILE-20000: %[[SUBVIEW2:.*]] = subview %[[ARG2]][%[[ivI]], 0, 0, 0] [%[[EXTENT]], %[[DIM21]], %[[DIM22]], %[[DIM23]]] // TILE-20000: linalg.conv(%[[ARG0]], %[[SUBVIEW1]], %[[SUBVIEW2]]) diff --git a/mlir/test/Dialect/Linalg/tile_parallel.mlir b/mlir/test/Dialect/Linalg/tile_parallel.mlir --- a/mlir/test/Dialect/Linalg/tile_parallel.mlir +++ b/mlir/test/Dialect/Linalg/tile_parallel.mlir @@ -27,7 +27,7 @@ // TILE-2-SAME: [[LHS:%.*]]: {{.*}}, [[RHS:%.*]]: {{.*}}, [[SUM:%.*]]: {{.*}}) { // TILE-2-DAG: [[C0:%.*]] = constant 0 : index // TILE-2-DAG: [[C2:%.*]] = constant 2 : index -// TILE-2: [[LHS_ROWS:%.*]] = dim [[LHS]], 0 +// TILE-2: [[LHS_ROWS:%.*]] = dim [[LHS]], %c0 // TILE-2: scf.parallel ([[I:%.*]]) = ([[C0]]) to ([[LHS_ROWS]]) step ([[C2]]) { // TILE-2-NO: scf.parallel // TILE-2: [[LHS_SUBVIEW:%.*]] = subview [[LHS]] @@ -39,7 +39,7 @@ // TILE-02-SAME: [[LHS:%.*]]: {{.*}}, [[RHS:%.*]]: {{.*}}, [[SUM:%.*]]: {{.*}}) { // TILE-02-DAG: [[C0:%.*]] = constant 0 : index // TILE-02-DAG: [[C2:%.*]] = constant 2 : index -// TILE-02: [[LHS_COLS:%.*]] = dim [[LHS]], 1 +// TILE-02: [[LHS_COLS:%.*]] = dim [[LHS]], %c1 // TILE-02: scf.parallel ([[I:%.*]]) = ([[C0]]) to ([[LHS_COLS]]) step ([[C2]]) { // TILE-02-NO: scf.parallel // TILE-02: [[LHS_SUBVIEW:%.*]] = subview [[LHS]] @@ -57,8 +57,8 @@ // TILE-234-DAG: [[C0:%.*]] = constant 0 : index // TILE-234-DAG: [[C2:%.*]] = constant 2 : index // TILE-234-DAG: [[C3:%.*]] = constant 3 : index -// TILE-234: [[LHS_ROWS:%.*]] = dim [[LHS]], 0 -// TILE-234: [[LHS_COLS:%.*]] = dim [[LHS]], 1 +// TILE-234: [[LHS_ROWS:%.*]] = dim [[LHS]], %c0 +// TILE-234: [[LHS_COLS:%.*]] = dim [[LHS]], %c1 // TILE-234: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]]) to ([[LHS_ROWS]], [[LHS_COLS]]) step ([[C2]], [[C3]]) { // TILE-234-NO: scf.parallel // TILE-234: [[LHS_SUBVIEW:%.*]] = subview [[LHS]] diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir --- a/mlir/test/Dialect/Linalg/transform-patterns.mlir +++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir @@ -252,9 +252,9 @@ %c4000 = constant 4000 : index %c0 = constant 0 : index %c1 = constant 1 : index - %0 = dim %arg0, 0 : memref - %1 = dim %arg0, 1 : memref - %2 = dim %arg1, 1 : memref + %0 = dim %arg0, %c0 : memref + %1 = dim %arg0, %c1 : memref + %2 = dim %arg1, %c1 : memref scf.for %arg3 = %c0 to %0 step %c2000 { scf.for %arg4 = %c0 to %2 step %c3000 { scf.for %arg5 = %c0 to %1 step %c4000 { @@ -302,9 +302,9 @@ %c4000 = constant 4000 : index %c0 = constant 0 : index %c1 = constant 1 : index - %0 = dim %arg0, 0 : memref - %1 = dim %arg0, 1 : memref - %2 = dim %arg1, 1 : memref + %0 = dim %arg0, %c0 : memref + %1 = dim %arg0, %c1 : memref + %2 = dim %arg1, %c1 : memref scf.for %arg3 = %c0 to %0 step %c2000 { scf.for %arg4 = %c0 to %2 step %c3000 { scf.for %arg5 = %c0 to %1 step %c4000 { @@ -381,9 +381,9 @@ // CHECK-DAG: %[[C8:.*]] = constant 8 : index // CHECK-DAG: %[[C4:.*]] = constant 4 : index // CHECK-DAG: %[[C0:.*]] = constant 0 : index -// CHECK-DAG: %[[D0:.*]] = dim %[[ARG0]], 0 -// CHECK-DAG: %[[D1:.*]] = dim %[[ARG0]], 1 -// CHECK-DAG: %[[D2:.*]] = dim %[[ARG1]], 1 +// CHECK-DAG: %[[D0:.*]] = dim %[[ARG0]], %c0 +// CHECK-DAG: %[[D1:.*]] = dim %[[ARG0]], %c1 +// CHECK-DAG: %[[D2:.*]] = dim %[[ARG1]], %c1 // CHECK: scf.parallel (%{{.*}}) = (%[[C0]]) to (%[[D2]]) step (%[[C8]]) // CHECK: scf.for %{{.*}} = %[[C0]] to %[[D1]] step %[[C4]] // CHECK: scf.parallel (%{{.*}}) = (%[[C0]]) to (%[[D0]]) step (%[[C16]]) diff --git a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir --- a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir +++ b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir @@ -7,8 +7,8 @@ %C: memref, %result: memref) { %c0 = constant 0 : index %c1 = constant 1 : index - %d0 = dim %A, 0 : memref - %d1 = dim %A, 1 : memref + %d0 = dim %A, %c0 : memref + %d1 = dim %A, %c1 : memref %b0 = affine.min #map0()[%d0, %outer_i0] %b1 = affine.min #map1()[%d1, %outer_i1] scf.parallel (%i0, %i1) = (%c0, %c0) to (%b0, %b1) step (%c1, %c1) { @@ -24,8 +24,8 @@ // CHECK-SAME: [[VAL_0:%.*]]: index, [[VAL_1:%.*]]: index, [[VAL_2:%.*]]: memref, [[VAL_3:%.*]]: memref, [[VAL_4:%.*]]: memref, [[VAL_5:%.*]]: memref) { // CHECK: [[VAL_6:%.*]] = constant 0 : index // CHECK: [[VAL_7:%.*]] = constant 1 : index -// CHECK: [[VAL_8:%.*]] = dim [[VAL_2]], 0 : memref -// CHECK: [[VAL_9:%.*]] = dim [[VAL_2]], 1 : memref +// CHECK: [[VAL_8:%.*]] = dim [[VAL_2]], [[VAL_6]] : memref +// CHECK: [[VAL_9:%.*]] = dim [[VAL_2]], [[VAL_7]] : memref // CHECK: [[VAL_10:%.*]] = affine.min #map0(){{\[}}[[VAL_8]], [[VAL_0]]] // CHECK: [[VAL_11:%.*]] = affine.min #map1(){{\[}}[[VAL_9]], [[VAL_1]]] // CHECK: [[VAL_12:%.*]] = constant 1024 : index diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp --- a/mlir/test/EDSC/builder-api-test.cpp +++ b/mlir/test/EDSC/builder-api-test.cpp @@ -663,9 +663,9 @@ // clang-format off // CHECK-LABEL: func @tile_2d // CHECK: %[[ZERO:.*]] = constant 0 : index - // CHECK: %[[M:[0-9]+]] = dim %arg2, 0 : memref - // CHECK-NEXT: %[[N:[0-9]+]] = dim %arg2, 1 : memref - // CHECK-NEXT: %[[P:[0-9]+]] = dim %arg2, 2 : memref + // CHECK: %[[M:[0-9]+]] = dim %arg2, %c0{{[_0-9]*}} : memref + // CHECK: %[[N:[0-9]+]] = dim %arg2, %c1{{[_0-9]*}} : memref + // CHECK: %[[P:[0-9]+]] = dim %arg2, %c2{{[_0-9]*}} : memref // CHECK: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[M]]) step 512 { // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[N]]) step 1024 { // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[P]]) { diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -20,68 +20,70 @@ // CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> // CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)> -// CHECK-LABEL: func @func_with_ops(%arg0: f32) { +// CHECK-LABEL: func @func_with_ops +// CHECK-SAME: %[[ARG:.*]]: f32 func @func_with_ops(f32) { ^bb0(%a : f32): - // CHECK: %0 = "getTensor"() : () -> tensor<4x4x?xf32> + // CHECK: %[[T:.*]] = "getTensor"() : () -> tensor<4x4x?xf32> %t = "getTensor"() : () -> tensor<4x4x?xf32> - // CHECK: %1 = dim %0, 2 : tensor<4x4x?xf32> - %t2 = "std.dim"(%t){index = 2} : (tensor<4x4x?xf32>) -> index + // CHECK: %[[C2:.*]] = constant 2 : index + // CHECK-NEXT: %{{.*}} = dim %[[T]], %[[C2]] : tensor<4x4x?xf32> + %c2 = constant 2 : index + %t2 = "std.dim"(%t, %c2) : (tensor<4x4x?xf32>, index) -> index - // CHECK: %2 = addf %arg0, %arg0 : f32 + // CHECK: %{{.*}} = addf %[[ARG]], %[[ARG]] : f32 %x = "std.addf"(%a, %a) : (f32,f32) -> (f32) - // CHECK: return + // CHECK: return return } // CHECK-LABEL: func @standard_instrs(%arg0: tensor<4x4x?xf32>, %arg1: f32, %arg2: i32, %arg3: index, %arg4: i64, %arg5: f16) { func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) { ^bb42(%t: tensor<4x4x?xf32>, %f: f32, %i: i32, %idx : index, %j: i64, %half: f16): - // CHECK: %0 = dim %arg0, 2 : tensor<4x4x?xf32> - %a = "std.dim"(%t){index = 2} : (tensor<4x4x?xf32>) -> index + // CHECK: %[[C2:.*]] = constant 2 : index + // CHECK: %[[A2:.*]] = dim %arg0, %[[C2]] : tensor<4x4x?xf32> + %c2 = constant 2 : index + %a2 = dim %t, %c2 : tensor<4x4x?xf32> - // CHECK: %1 = dim %arg0, 2 : tensor<4x4x?xf32> - %a2 = dim %t, 2 : tensor<4x4x?xf32> - - // CHECK: %2 = addf %arg1, %arg1 : f32 + // CHECK: %[[F2:.*]] = addf %arg1, %arg1 : f32 %f2 = "std.addf"(%f, %f) : (f32,f32) -> f32 - // CHECK: %3 = addf %2, %2 : f32 + // CHECK: %[[F3:.*]] = addf %[[F2]], %[[F2]] : f32 %f3 = addf %f2, %f2 : f32 - // CHECK: %4 = addi %arg2, %arg2 : i32 + // CHECK: %[[I2:.*]] = addi %arg2, %arg2 : i32 %i2 = "std.addi"(%i, %i) : (i32,i32) -> i32 - // CHECK: %5 = addi %4, %4 : i32 + // CHECK: %[[I3:.*]] = addi %[[I2]], %[[I2]] : i32 %i3 = addi %i2, %i2 : i32 - // CHECK: %{{[0-9]+}} = addi %arg3, %arg3 : index + // CHECK: %[[IDX1:.*]] = addi %arg3, %arg3 : index %idx1 = addi %idx, %idx : index - // CHECK: %{{[0-9]+}} = addi %arg3, %{{[0-9]+}} : index + // CHECK: %[[IDX2:.*]] = addi %arg3, %[[IDX1]] : index %idx2 = "std.addi"(%idx, %idx1) : (index, index) -> index - // CHECK: %8 = subf %arg1, %arg1 : f32 + // CHECK: %[[F4:.*]] = subf %arg1, %arg1 : f32 %f4 = "std.subf"(%f, %f) : (f32,f32) -> f32 - // CHECK: %9 = subf %8, %8 : f32 + // CHECK: %[[F5:.*]] = subf %[[F4]], %[[F4]] : f32 %f5 = subf %f4, %f4 : f32 - // CHECK: %10 = subi %arg2, %arg2 : i32 + // CHECK: %[[I4:.*]] = subi %arg2, %arg2 : i32 %i4 = "std.subi"(%i, %i) : (i32,i32) -> i32 - // CHECK: %11 = subi %10, %10 : i32 + // CHECK: %[[I5:.*]] = subi %[[I4]], %[[I4]] : i32 %i5 = subi %i4, %i4 : i32 - // CHECK: %12 = mulf %2, %2 : f32 + // CHECK: %[[F6:.*]] = mulf %[[F2]], %[[F2]] : f32 %f6 = mulf %f2, %f2 : f32 - // CHECK: %13 = muli %4, %4 : i32 + // CHECK: %[[I6:.*]] = muli %[[I2]], %[[I2]] : i32 %i6 = muli %i2, %i2 : i32 - // CHECK: %[[C0:.*]] = create_complex %[[F2:.*]], %[[F2]] : complex + // CHECK: %[[C0:.*]] = create_complex %[[F2]], %[[F2]] : complex %c0 = "std.create_complex"(%f2, %f2) : (f32, f32) -> complex // CHECK: %[[C1:.*]] = create_complex %[[F2]], %[[F2]] : complex @@ -465,7 +467,7 @@ // CHECK: %{{[0-9]+}} = shift_left %arg2, %arg2 : i32 %124 = "std.shift_left"(%i, %i) : (i32, i32) -> i32 - // CHECK:%{{[0-9]+}} = shift_left %4, %4 : i32 + // CHECK:%{{[0-9]+}} = shift_left %[[I2]], %[[I2]] : i32 %125 = shift_left %i2, %i2 : i32 // CHECK: %{{[0-9]+}} = shift_left %arg3, %arg3 : index @@ -480,7 +482,7 @@ // CHECK: %{{[0-9]+}} = shift_right_signed %arg2, %arg2 : i32 %129 = "std.shift_right_signed"(%i, %i) : (i32, i32) -> i32 - // CHECK:%{{[0-9]+}} = shift_right_signed %4, %4 : i32 + // CHECK:%{{[0-9]+}} = shift_right_signed %[[I2]], %[[I2]] : i32 %130 = shift_right_signed %i2, %i2 : i32 // CHECK: %{{[0-9]+}} = shift_right_signed %arg3, %arg3 : index @@ -495,7 +497,7 @@ // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg2, %arg2 : i32 %134 = "std.shift_right_unsigned"(%i, %i) : (i32, i32) -> i32 - // CHECK:%{{[0-9]+}} = shift_right_unsigned %4, %4 : i32 + // CHECK:%{{[0-9]+}} = shift_right_unsigned %[[I2]], %[[I2]] : i32 %135 = shift_right_unsigned %i2, %i2 : i32 // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg3, %arg3 : index @@ -778,10 +780,13 @@ return } -// CHECK-LABEL: func @test_dimop(%arg0 +// CHECK-LABEL: func @test_dimop +// CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32> func @test_dimop(%arg0: tensor<4x4x?xf32>) { - // CHECK: %0 = dim %arg0, 2 : tensor<4x4x?xf32> - %0 = dim %arg0, 2 : tensor<4x4x?xf32> + // CHECK: %[[C2:.*]] = constant 2 : index + // CHECK: %{{.*}} = dim %[[ARG]], %[[C2]] : tensor<4x4x?xf32> + %c2 = constant 2 : index + %0 = dim %arg0, %c2 : tensor<4x4x?xf32> // use dim as an index to ensure type correctness %1 = affine.apply affine_map<(d0) -> (d0)>(%0) return diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -1,24 +1,8 @@ // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics -func @dim(tensor<1xf32>) { -^bb(%0: tensor<1xf32>): - "std.dim"(%0){index = "xyz"} : (tensor<1xf32>)->index // expected-error {{attribute 'index' failed to satisfy constraint: arbitrary integer attribute}} - return -} - -// ----- - -func @dim2(tensor<1xf32>) { -^bb(%0: tensor<1xf32>): - "std.dim"(){index = "xyz"} : ()->index // expected-error {{'std.dim' op requires a single operand}} - return -} - -// ----- - -func @dim3(tensor<1xf32>) { -^bb(%0: tensor<1xf32>): - "std.dim"(%0){index = 1} : (tensor<1xf32>)->index // expected-error {{'std.dim' op index is out of range}} +func @dim(%arg : tensor<1x?xf32>) { + %c2 = constant 2 : index + dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'std.dim' op index is out of range}} return } diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -28,7 +28,8 @@ func @dim(%arg0: tensor<8x4xf32>) -> index { // CHECK: %c4 = constant 4 : index - %0 = dim %arg0, 1 : tensor<8x4xf32> + %c1 = constant 1 : index + %0 = dim %arg0, %c1 : tensor<8x4xf32> // CHECK-NEXT: return %c4 return %0 : index @@ -51,7 +52,8 @@ // CHECK-LABEL: func @trivial_dce func @trivial_dce(%arg0: tensor<8x4xf32>) { - %0 = dim %arg0, 1 : tensor<8x4xf32> + %c1 = constant 1 : index + %0 = dim %arg0, %c1 : tensor<8x4xf32> // CHECK-NEXT: return return } @@ -314,7 +316,7 @@ %0 = memref_cast %arg0 : memref<4xf32> to memref // CHECK-NEXT: %c0 = constant 0 : index %c0 = constant 0 : index - %dim = dim %0, 0 : memref + %dim = dim %0, %c0 : memref // CHECK-NEXT: affine.load %arg0[3] %1 = affine.load %0[%dim - 1] : memref @@ -442,24 +444,25 @@ // CHECK-SAME: [[K:arg[0-9]+]]: index %c0 = constant 0 : index %c1 = constant 1 : index + %c2 = constant 2 : index %0 = alloc(%arg0, %arg1) : memref %1 = alloc(%arg1, %arg2) : memref - %2 = dim %1, 2 : memref + %2 = dim %1, %c2 : memref affine.for %arg3 = 0 to %2 { %3 = alloc(%arg0) : memref - %ub = dim %3, 0 : memref + %ub = dim %3, %c0 : memref affine.for %arg4 = 0 to %ub { - %s = dim %0, 0 : memref + %s = dim %0, %c0 : memref %v = std.view %3[%c0][%arg4, %s] : memref to memref %sv = subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref to memref - %l = dim %v, 1 : memref - %u = dim %sv, 0 : memref + %l = dim %v, %c1 : memref + %u = dim %sv, %c0 : memref affine.for %arg5 = %l to %u { "foo"() : () -> () } %sv2 = subview %0[0, 0][17, %arg4][1, 1] : memref to memref<17x?xf32, #map3> - %l2 = dim %v, 1 : memref - %u2 = dim %sv2, 1 : memref<17x?xf32, #map3> + %l2 = dim %v, %c1 : memref + %u2 = dim %sv2, %c1 : memref<17x?xf32, #map3> scf.for %arg5 = %l2 to %u2 step %c1 { "foo"() : () -> () } @@ -480,9 +483,9 @@ %B = view %BUF[%c0][%K, %N] : memref to memref %C = view %BUF[%c0][%M, %N] : memref to memref - %M_ = dim %A, 0 : memref - %K_ = dim %A, 1 : memref - %N_ = dim %C, 1 : memref + %M_ = dim %A, %c0 : memref + %K_ = dim %A, %c1 : memref + %N_ = dim %C, %c1 : memref scf.for %i = %c0 to %M_ step %c1 { scf.for %j = %c0 to %N_ step %c1 { scf.for %k = %c0 to %K_ step %c1 { @@ -855,8 +858,8 @@ store %v0, %20[%arg1, %arg1] : memref<12x4xf32, offset: ?, strides:[4, 1]> // Test: dim on subview is rewritten to size operand. - %7 = dim %4, 0 : memref - %8 = dim %4, 1 : memref + %7 = dim %4, %c0 : memref + %8 = dim %4, %c1 : memref // CHECK: return %[[C7]], %[[C11]] return %7, %8 : index, index diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir --- a/mlir/test/Transforms/constant-fold.mlir +++ b/mlir/test/Transforms/constant-fold.mlir @@ -382,7 +382,8 @@ func @dim(%x : tensor<8x4xf32>) -> index { // CHECK:[[C4:%.+]] = constant 4 : index - %0 = dim %x, 1 : tensor<8x4xf32> + %c1 = constant 1 : index + %0 = dim %x, %c1 : tensor<8x4xf32> // CHECK-NEXT: return [[C4]] return %0 : index diff --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir --- a/mlir/test/Transforms/pipeline-data-transfer.mlir +++ b/mlir/test/Transforms/pipeline-data-transfer.mlir @@ -330,8 +330,10 @@ // Double buffering for dynamic shaped buffer. // CHECK: alloc(%{{.*}}, %{{.*}}) : memref -// CHECK-NEXT: dim %{{.*}}, 0 : memref -// CHECK-NEXT: dim %{{.*}}, 1 : memref +// CHECK-NEXT: %[[C0:.*]] = constant 0 : index +// CHECK-NEXT: dim %{{.*}}, %[[C0]] : memref +// CHECK-NEXT: %[[C1:.*]] = constant 1 : index +// CHECK-NEXT: dim %{{.*}}, %[[C1]] : memref // CHECK-NEXT: alloc(%{{.*}}, %{{.*}}) : memref<2x?x?xf32, 2> // CHECK: affine.dma_start %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}} mod 2, 0, 0], %{{.*}}[%{{.*}} mod 2, 0], %{{.*}} affine.for %kTT = 0 to 16 { diff --git a/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir --- a/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir +++ b/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir @@ -23,15 +23,18 @@ %pC = memref_cast %C : memref<16x16xf32> to memref<*xf32> call @print_memref_f32(%pC) : (memref<*xf32>) -> () - %M = dim %C, 0 : memref<16x16xf32> - %N = dim %C, 1 : memref<16x16xf32> - %K = dim %A, 1 : memref<16x16xf32> + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + + %M = dim %C, %c0 : memref<16x16xf32> + %N = dim %C, %c1 : memref<16x16xf32> + %K = dim %A, %c1 : memref<16x16xf32> %f1 = muli %M, %N : index %f2 = muli %f1, %K : index // 2*M*N*K. - %c2 = constant 2 : index %f3 = muli %c2, %f2 : index %num_flops = muli %reps, %f3 : index %num_flops_i = index_cast %num_flops : index to i16 diff --git a/mlir/test/mlir-cuda-runner/all-reduce-op.mlir b/mlir/test/mlir-cuda-runner/all-reduce-op.mlir --- a/mlir/test/mlir-cuda-runner/all-reduce-op.mlir +++ b/mlir/test/mlir-cuda-runner/all-reduce-op.mlir @@ -4,13 +4,15 @@ func @main() { %arg = alloc() : memref<2x4x13xf32> %dst = memref_cast %arg : memref<2x4x13xf32> to memref - %one = constant 1 : index - %sx = dim %dst, 2 : memref - %sy = dim %dst, 1 : memref - %sz = dim %dst, 0 : memref + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %sx = dim %dst, %c2 : memref + %sy = dim %dst, %c1 : memref + %sz = dim %dst, %c0 : memref %cast_dst = memref_cast %dst : memref to memref<*xf32> call @mcuMemHostRegisterFloat(%cast_dst) : (memref<*xf32>) -> () - gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one) + gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1) threads(%tx, %ty, %tz) in (%block_x = %sx, %block_y = %sy, %block_z = %sz) { %t0 = muli %tz, %block_y : index %t1 = addi %ty, %t0 : index diff --git a/mlir/test/mlir-cuda-runner/all-reduce-region.mlir b/mlir/test/mlir-cuda-runner/all-reduce-region.mlir --- a/mlir/test/mlir-cuda-runner/all-reduce-region.mlir +++ b/mlir/test/mlir-cuda-runner/all-reduce-region.mlir @@ -5,7 +5,8 @@ %arg = alloc() : memref<35xf32> %dst = memref_cast %arg : memref<35xf32> to memref %one = constant 1 : index - %sx = dim %dst, 0 : memref + %c0 = constant 0 : index + %sx = dim %dst, %c0 : memref %cast_dst = memref_cast %dst : memref to memref<*xf32> call @mcuMemHostRegisterFloat(%cast_dst) : (memref<*xf32>) -> () gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one) diff --git a/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir b/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir --- a/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir +++ b/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir @@ -2,7 +2,8 @@ func @other_func(%arg0 : f32, %arg1 : memref) { %cst = constant 1 : index - %cst2 = dim %arg1, 0 : memref + %c0 = constant 0 : index + %cst2 = dim %arg1, %c0 : memref gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst, %grid_z = %cst) threads(%tx, %ty, %tz) in (%block_x = %cst2, %block_y = %cst, %block_z = %cst) { store %arg0, %arg1[%tx] : memref diff --git a/mlir/test/mlir-cuda-runner/shuffle.mlir b/mlir/test/mlir-cuda-runner/shuffle.mlir --- a/mlir/test/mlir-cuda-runner/shuffle.mlir +++ b/mlir/test/mlir-cuda-runner/shuffle.mlir @@ -5,7 +5,8 @@ %arg = alloc() : memref<13xf32> %dst = memref_cast %arg : memref<13xf32> to memref %one = constant 1 : index - %sx = dim %dst, 0 : memref + %c0 = constant 0 : index + %sx = dim %dst, %c0 : memref %cast_dest = memref_cast %dst : memref to memref<*xf32> call @mcuMemHostRegisterFloat(%cast_dest) : (memref<*xf32>) -> () gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one) diff --git a/mlir/test/mlir-cuda-runner/two-modules.mlir b/mlir/test/mlir-cuda-runner/two-modules.mlir --- a/mlir/test/mlir-cuda-runner/two-modules.mlir +++ b/mlir/test/mlir-cuda-runner/two-modules.mlir @@ -5,7 +5,8 @@ %arg = alloc() : memref<13xi32> %dst = memref_cast %arg : memref<13xi32> to memref %one = constant 1 : index - %sx = dim %dst, 0 : memref + %c0 = constant 0 : index + %sx = dim %dst, %c0 : memref %cast_dst = memref_cast %dst : memref to memref<*xi32> call @mcuMemHostRegisterInt32(%cast_dst) : (memref<*xi32>) -> () gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one)