diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td --- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td @@ -2820,8 +2820,8 @@ ``` }]; - let arguments = (ins SignlessIntegerLike:$value); - let results = (outs SignlessIntegerLike); + let arguments = (ins SignedOrSignlessIntegerLike:$value); + let results = (outs SignedOrSignlessIntegerLike); let builders = [ OpBuilderDAG<(ins "Value":$value, "Type":$destType), [{ diff --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td --- a/mlir/include/mlir/IR/OpBase.td +++ b/mlir/include/mlir/IR/OpBase.td @@ -728,6 +728,36 @@ TensorOf<[AnySignlessInteger]>.predicate]>, "signless-integer-like">; +// Type constraint for signed-or-signless-integer-like types: +// signed integers, indices, vectors of signed integers, +// tensors of signed integers +// as well as +// signless integers, indices, vectors of signless integers, +// tensors of signless integers. +def SignedOrSignlessIntegerLike : TypeConstraint.predicate, + TensorOf<[AnySignlessInteger]>.predicate, + AnySignedInteger.predicate, Index.predicate, + VectorOf<[AnySignedInteger]>.predicate, + TensorOf<[AnySignedInteger]>.predicate]>, + "signed-or-signless-integer-like">; + +// Type constraint for unsigned-or-signless-integer-like types: +// unsigned integers, indices, vectors of unsigned integers, +// tensors of unsigned integers +// as well as +// signless integers, indices, vectors of signless integers, +// tensors of signless integers. +def UnsignedOrSignlessIntegerLike : TypeConstraint.predicate, + TensorOf<[AnySignlessInteger]>.predicate, + AnyUnsignedInteger.predicate, Index.predicate, + VectorOf<[AnyUnsignedInteger]>.predicate, + TensorOf<[AnyUnsignedInteger]>.predicate]>, + "unsigned-or-signless-integer-like">; + // Type constraint for float-like types: floats, vectors or tensors thereof. def FloatLike : TypeConstraint.predicate, TensorOf<[AnyFloat]>.predicate]>, diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -24,7 +24,7 @@ // CHECK-DAG: #[[$SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)> // CHECK-LABEL: func @func_with_ops -// CHECK-SAME: %[[ARG:.*]]: f32 +// CHECK-SAME: %[[ARG:[0-9a-z]*]]: f32 func @func_with_ops(f32) { ^bb0(%a : f32): // CHECK: %[[T:.*]] = "getTensor"() : () -> tensor<4x4x?xf32> @@ -35,7 +35,7 @@ %c2 = constant 2 : index %t2 = "std.dim"(%t, %c2) : (tensor<4x4x?xf32>, index) -> index - // CHECK: %{{.*}} = addf %[[ARG]], %[[ARG]] : f32 + // CHECK: addf %[[ARG]], %[[ARG]] : f32 %x = "std.addf"(%a, %a) : (f32,f32) -> (f32) // CHECK: return @@ -46,35 +46,35 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) { ^bb42(%t: tensor<4x4x?xf32>, %f: f32, %i: i32, %idx : index, %j: i64, %half: f16): // CHECK: %[[C2:.*]] = constant 2 : index - // CHECK: %[[A2:.*]] = dim %arg0, %[[C2]] : tensor<4x4x?xf32> + // CHECK: %[[A2:.*]] = dim %{{.*}}, %[[C2]] : tensor<4x4x?xf32> %c2 = constant 2 : index %a2 = dim %t, %c2 : tensor<4x4x?xf32> - // CHECK: %[[F2:.*]] = addf %arg1, %arg1 : f32 + // CHECK: %[[F2:.*]] = addf %{{.*}} : f32 %f2 = "std.addf"(%f, %f) : (f32,f32) -> f32 // CHECK: %[[F3:.*]] = addf %[[F2]], %[[F2]] : f32 %f3 = addf %f2, %f2 : f32 - // CHECK: %[[I2:.*]] = addi %arg2, %arg2 : i32 + // CHECK: %[[I2:.*]] = addi %{{.*}} : i32 %i2 = "std.addi"(%i, %i) : (i32,i32) -> i32 // CHECK: %[[I3:.*]] = addi %[[I2]], %[[I2]] : i32 %i3 = addi %i2, %i2 : i32 - // CHECK: %[[IDX1:.*]] = addi %arg3, %arg3 : index + // CHECK: %[[IDX1:.*]] = addi %{{.*}} : index %idx1 = addi %idx, %idx : index - // CHECK: %[[IDX2:.*]] = addi %arg3, %[[IDX1]] : index + // CHECK: %[[IDX2:.*]] = addi %{{.*}}, %[[IDX1]] : index %idx2 = "std.addi"(%idx, %idx1) : (index, index) -> index - // CHECK: %[[F4:.*]] = subf %arg1, %arg1 : f32 + // CHECK: %[[F4:.*]] = subf %{{.*}} : f32 %f4 = "std.subf"(%f, %f) : (f32,f32) -> f32 // CHECK: %[[F5:.*]] = subf %[[F4]], %[[F4]] : f32 %f5 = subf %f4, %f4 : f32 - // CHECK: %[[I4:.*]] = subi %arg2, %arg2 : i32 + // CHECK: %[[I4:.*]] = subi %{{.*}} : i32 %i4 = "std.subi"(%i, %i) : (i32,i32) -> i32 // CHECK: %[[I5:.*]] = subi %[[I4]], %[[I4]] : i32 @@ -104,214 +104,217 @@ // CHECK: %[[IMAG1:.*]] = im %[[CPLX0]] : complex %imag1 = im %c0 : complex - // CHECK: %c42_i32 = constant 42 : i32 + // CHECK: constant 42 : i32 %x = "std.constant"(){value = 42 : i32} : () -> i32 - // CHECK: %c42_i32_0 = constant 42 : i32 + // CHECK: constant 42 : i32 %7 = constant 42 : i32 - // CHECK: %c43 = constant {crazy = "std.foo"} 43 : index + // CHECK: constant {crazy = "std.foo"} 43 : index %8 = constant {crazy = "std.foo"} 43: index - // CHECK: %cst = constant 4.300000e+01 : bf16 + // CHECK: constant 4.300000e+01 : bf16 %9 = constant 43.0 : bf16 - // CHECK: %f = constant @func_with_ops : (f32) -> () + // CHECK: constant @func_with_ops : (f32) -> () %10 = constant @func_with_ops : (f32) -> () - // CHECK: %f_1 = constant @affine_apply : () -> () + // CHECK: constant @affine_apply : () -> () %11 = constant @affine_apply : () -> () - // CHECK: %f_2 = constant @affine_apply : () -> () + // CHECK: constant @affine_apply : () -> () %12 = constant @affine_apply : () -> () - // CHECK: %cst_3 = constant dense<0> : vector<4xi32> + // CHECK: constant dense<0> : vector<4xi32> %13 = constant dense<0> : vector<4 x i32> - // CHECK: %cst_4 = constant dense<0> : tensor<42xi32> + // CHECK: constant dense<0> : tensor<42xi32> %tci32 = constant dense<0> : tensor<42 x i32> - // CHECK: %cst_5 = constant dense<0> : vector<42xi32> + // CHECK: constant dense<0> : vector<42xi32> %vci32 = constant dense<0> : vector<42 x i32> - // CHECK: %{{[0-9]+}} = cmpi "eq", %{{[0-9]+}}, %{{[0-9]+}} : i32 + // CHECK: constant dense<0> : vector<42xsi32> + %vci32b = constant dense<0> : vector<42 x si32> + + // CHECK: cmpi "eq", %{{[0-9]+}}, %{{[0-9]+}} : i32 %14 = cmpi "eq", %i3, %i4 : i32 // Predicate 1 means inequality comparison. - // CHECK: %{{[0-9]+}} = cmpi "ne", %{{[0-9]+}}, %{{[0-9]+}} : i32 + // CHECK: cmpi "ne", %{{[0-9]+}}, %{{[0-9]+}} : i32 %15 = "std.cmpi"(%i3, %i4) {predicate = 1} : (i32, i32) -> i1 - // CHECK: %{{[0-9]+}} = cmpi "slt", %cst_3, %cst_3 : vector<4xi32> + // CHECK: cmpi "slt", %{{.*}} : vector<4xi32> %16 = cmpi "slt", %13, %13 : vector<4 x i32> - // CHECK: %{{[0-9]+}} = cmpi "ne", %cst_3, %cst_3 : vector<4xi32> + // CHECK: cmpi "ne", %{{.*}} : vector<4xi32> %17 = "std.cmpi"(%13, %13) {predicate = 1} : (vector<4 x i32>, vector<4 x i32>) -> vector<4 x i1> - // CHECK: %{{[0-9]+}} = cmpi "slt", %arg3, %arg3 : index + // CHECK: cmpi "slt", %{{.*}} : index %18 = cmpi "slt", %idx, %idx : index - // CHECK: %{{[0-9]+}} = cmpi "eq", %cst_4, %cst_4 : tensor<42xi32> + // CHECK: cmpi "eq", %{{.*}} : tensor<42xi32> %19 = cmpi "eq", %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = cmpi "eq", %cst_5, %cst_5 : vector<42xi32> + // CHECK: cmpi "eq", %{{.*}} : vector<42xi32> %20 = cmpi "eq", %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %arg3, %arg3 : index + // CHECK: select %{{[0-9]+}}, %{{.*}} : index %21 = select %18, %idx, %idx : index - // CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %cst_4, %cst_4 : tensor<42xi1>, tensor<42xi32> + // CHECK: select %{{[0-9]+}}, %{{.*}} : tensor<42xi1>, tensor<42xi32> %22 = select %19, %tci32, %tci32 : tensor<42 x i1>, tensor<42 x i32> - // CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %cst_5, %cst_5 : vector<42xi1>, vector<42xi32> + // CHECK: select %{{[0-9]+}}, %{{.*}} : vector<42xi1>, vector<42xi32> %23 = select %20, %vci32, %vci32 : vector<42 x i1>, vector<42 x i32> - // CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %arg3, %arg3 : index + // CHECK: select %{{[0-9]+}}, %{{.*}} : index %24 = "std.select"(%18, %idx, %idx) : (i1, index, index) -> index - // CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %cst_4, %cst_4 : tensor<42xi32> + // CHECK: select %{{[0-9]+}}, %{{.*}} : tensor<42xi32> %25 = std.select %18, %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = divi_signed %arg2, %arg2 : i32 + // CHECK: divi_signed %{{.*}} : i32 %26 = divi_signed %i, %i : i32 - // CHECK: %{{[0-9]+}} = divi_signed %arg3, %arg3 : index + // CHECK: divi_signed %{{.*}} : index %27 = divi_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = divi_signed %cst_5, %cst_5 : vector<42xi32> + // CHECK: divi_signed %{{.*}} : vector<42xi32> %28 = divi_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = divi_signed %cst_4, %cst_4 : tensor<42xi32> + // CHECK: divi_signed %{{.*}} : tensor<42xi32> %29 = divi_signed %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = divi_signed %arg2, %arg2 : i32 + // CHECK: divi_signed %{{.*}} : i32 %30 = "std.divi_signed"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = divi_unsigned %arg2, %arg2 : i32 + // CHECK: divi_unsigned %{{.*}} : i32 %31 = divi_unsigned %i, %i : i32 - // CHECK: %{{[0-9]+}} = divi_unsigned %arg3, %arg3 : index + // CHECK: divi_unsigned %{{.*}} : index %32 = divi_unsigned %idx, %idx : index - // CHECK: %{{[0-9]+}} = divi_unsigned %cst_5, %cst_5 : vector<42xi32> + // CHECK: divi_unsigned %{{.*}} : vector<42xi32> %33 = divi_unsigned %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = divi_unsigned %cst_4, %cst_4 : tensor<42xi32> + // CHECK: divi_unsigned %{{.*}} : tensor<42xi32> %34 = divi_unsigned %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = divi_unsigned %arg2, %arg2 : i32 + // CHECK: divi_unsigned %{{.*}} : i32 %35 = "std.divi_unsigned"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = remi_signed %arg2, %arg2 : i32 + // CHECK: remi_signed %{{.*}} : i32 %36 = remi_signed %i, %i : i32 - // CHECK: %{{[0-9]+}} = remi_signed %arg3, %arg3 : index + // CHECK: remi_signed %{{.*}} : index %37 = remi_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = remi_signed %cst_5, %cst_5 : vector<42xi32> + // CHECK: remi_signed %{{.*}} : vector<42xi32> %38 = remi_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = remi_signed %cst_4, %cst_4 : tensor<42xi32> + // CHECK: remi_signed %{{.*}} : tensor<42xi32> %39 = remi_signed %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = remi_signed %arg2, %arg2 : i32 + // CHECK: remi_signed %{{.*}} : i32 %40 = "std.remi_signed"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = remi_unsigned %arg2, %arg2 : i32 + // CHECK: remi_unsigned %{{.*}} : i32 %41 = remi_unsigned %i, %i : i32 - // CHECK: %{{[0-9]+}} = remi_unsigned %arg3, %arg3 : index + // CHECK: remi_unsigned %{{.*}} : index %42 = remi_unsigned %idx, %idx : index - // CHECK: %{{[0-9]+}} = remi_unsigned %cst_5, %cst_5 : vector<42xi32> + // CHECK: remi_unsigned %{{.*}} : vector<42xi32> %43 = remi_unsigned %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = remi_unsigned %cst_4, %cst_4 : tensor<42xi32> + // CHECK: remi_unsigned %{{.*}} : tensor<42xi32> %44 = remi_unsigned %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = remi_unsigned %arg2, %arg2 : i32 + // CHECK: remi_unsigned %{{.*}} : i32 %45 = "std.remi_unsigned"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = divf %arg1, %arg1 : f32 + // CHECK: divf %{{.*}} : f32 %46 = "std.divf"(%f, %f) : (f32,f32) -> f32 - // CHECK: %{{[0-9]+}} = divf %arg1, %arg1 : f32 + // CHECK: divf %{{.*}} : f32 %47 = divf %f, %f : f32 - // CHECK: %{{[0-9]+}} = divf %arg0, %arg0 : tensor<4x4x?xf32> + // CHECK: divf %{{.*}} : tensor<4x4x?xf32> %48 = divf %t, %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = remf %arg1, %arg1 : f32 + // CHECK: remf %{{.*}} : f32 %49 = "std.remf"(%f, %f) : (f32,f32) -> f32 - // CHECK: %{{[0-9]+}} = remf %arg1, %arg1 : f32 + // CHECK: remf %{{.*}} : f32 %50 = remf %f, %f : f32 - // CHECK: %{{[0-9]+}} = remf %arg0, %arg0 : tensor<4x4x?xf32> + // CHECK: remf %{{.*}} : tensor<4x4x?xf32> %51 = remf %t, %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = and %arg2, %arg2 : i32 + // CHECK: and %{{.*}} : i32 %52 = "std.and"(%i, %i) : (i32,i32) -> i32 - // CHECK: %{{[0-9]+}} = and %arg2, %arg2 : i32 + // CHECK: and %{{.*}} : i32 %53 = and %i, %i : i32 - // CHECK: %{{[0-9]+}} = and %cst_5, %cst_5 : vector<42xi32> + // CHECK: and %{{.*}} : vector<42xi32> %54 = std.and %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = and %cst_4, %cst_4 : tensor<42xi32> + // CHECK: and %{{.*}} : tensor<42xi32> %55 = and %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = or %arg2, %arg2 : i32 + // CHECK: or %{{.*}} : i32 %56 = "std.or"(%i, %i) : (i32,i32) -> i32 - // CHECK: %{{[0-9]+}} = or %arg2, %arg2 : i32 + // CHECK: or %{{.*}} : i32 %57 = or %i, %i : i32 - // CHECK: %{{[0-9]+}} = or %cst_5, %cst_5 : vector<42xi32> + // CHECK: or %{{.*}} : vector<42xi32> %58 = std.or %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = or %cst_4, %cst_4 : tensor<42xi32> + // CHECK: or %{{.*}} : tensor<42xi32> %59 = or %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = xor %arg2, %arg2 : i32 + // CHECK: xor %{{.*}} : i32 %60 = "std.xor"(%i, %i) : (i32,i32) -> i32 - // CHECK: %{{[0-9]+}} = xor %arg2, %arg2 : i32 + // CHECK: xor %{{.*}} : i32 %61 = xor %i, %i : i32 - // CHECK: %{{[0-9]+}} = xor %cst_5, %cst_5 : vector<42xi32> + // CHECK: xor %{{.*}} : vector<42xi32> %62 = std.xor %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = xor %cst_4, %cst_4 : tensor<42xi32> + // CHECK: xor %{{.*}} : tensor<42xi32> %63 = xor %tci32, %tci32 : tensor<42 x i32> %64 = constant dense<0.> : vector<4 x f32> %tcf32 = constant dense<0.> : tensor<42 x f32> %vcf32 = constant dense<0.> : vector<4 x f32> - // CHECK: %{{[0-9]+}} = cmpf "ogt", %{{[0-9]+}}, %{{[0-9]+}} : f32 + // CHECK: cmpf "ogt", %{{[0-9]+}}, %{{[0-9]+}} : f32 %65 = cmpf "ogt", %f3, %f4 : f32 // Predicate 0 means ordered equality comparison. - // CHECK: %{{[0-9]+}} = cmpf "oeq", %{{[0-9]+}}, %{{[0-9]+}} : f32 + // CHECK: cmpf "oeq", %{{[0-9]+}}, %{{[0-9]+}} : f32 %66 = "std.cmpf"(%f3, %f4) {predicate = 1} : (f32, f32) -> i1 - // CHECK: %{{[0-9]+}} = cmpf "olt", %cst_8, %cst_8 : vector<4xf32> + // CHECK: cmpf "olt", %{{.*}} : vector<4xf32> %67 = cmpf "olt", %vcf32, %vcf32 : vector<4 x f32> - // CHECK: %{{[0-9]+}} = cmpf "oeq", %cst_8, %cst_8 : vector<4xf32> + // CHECK: cmpf "oeq", %{{.*}} : vector<4xf32> %68 = "std.cmpf"(%vcf32, %vcf32) {predicate = 1} : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x i1> - // CHECK: %{{[0-9]+}} = cmpf "oeq", %cst_7, %cst_7 : tensor<42xf32> + // CHECK: cmpf "oeq", %{{.*}} : tensor<42xf32> %69 = cmpf "oeq", %tcf32, %tcf32 : tensor<42 x f32> - // CHECK: %{{[0-9]+}} = cmpf "oeq", %cst_8, %cst_8 : vector<4xf32> + // CHECK: cmpf "oeq", %{{.*}} : vector<4xf32> %70 = cmpf "oeq", %vcf32, %vcf32 : vector<4 x f32> - // CHECK: %{{[0-9]+}} = rank %arg0 : tensor<4x4x?xf32> + // CHECK: rank %{{.*}} : tensor<4x4x?xf32> %71 = "std.rank"(%t) : (tensor<4x4x?xf32>) -> index - // CHECK: %{{[0-9]+}} = rank %arg0 : tensor<4x4x?xf32> + // CHECK: rank %{{.*}} : tensor<4x4x?xf32> %72 = rank %t : tensor<4x4x?xf32> // CHECK: = constant unit @@ -341,40 +344,49 @@ // CHECK: = sitofp {{.*}} : i64 to f64 %81 = sitofp %j : i64 to f64 - // CHECK: = sexti %arg2 : i32 to i64 + // CHECK: = sexti %{{.*}} : i32 to i64 %82 = "std.sexti"(%i) : (i32) -> i64 - // CHECK: = sexti %arg2 : i32 to i64 + // CHECK: = sexti %{{.*}} : i32 to i64 %83 = sexti %i : i32 to i64 - // CHECK: %{{[0-9]+}} = sexti %cst_5 : vector<42xi32> + // CHECK: sexti %{{.*}} : vector<42xi32> %84 = sexti %vci32 : vector<42 x i32> to vector<42 x i64> - // CHECK: %{{[0-9]+}} = sexti %cst_4 : tensor<42xi32> + // CHECK: sexti %{{.*}} : vector<42xsi32> to vector<42xi64> + %84001 = sexti %vci32b : vector<42 x si32> to vector<42 x i64> + + // CHECK: sexti %{{.*}} : vector<42xsi32> to vector<42xsi64> + %84002 = sexti %vci32b : vector<42 x si32> to vector<42 x si64> + + // CHECK: sexti %{{.*}} : vector<42xi32> to vector<42xsi64> + %84003 = sexti %vci32 : vector<42 x i32> to vector<42 x si64> + + // CHECK: sexti %{{.*}} : tensor<42xi32> %85 = sexti %tci32 : tensor<42 x i32> to tensor<42 x i64> - // CHECK: = zexti %arg2 : i32 to i64 + // CHECK: = zexti %{{.*}} : i32 to i64 %86 = "std.zexti"(%i) : (i32) -> i64 - // CHECK: = zexti %arg2 : i32 to i64 + // CHECK: = zexti %{{.*}} : i32 to i64 %87 = zexti %i : i32 to i64 - // CHECK: %{{[0-9]+}} = zexti %cst_5 : vector<42xi32> + // CHECK: zexti %{{.*}} : vector<42xi32> %88 = zexti %vci32 : vector<42 x i32> to vector<42 x i64> - // CHECK: %{{[0-9]+}} = zexti %cst_4 : tensor<42xi32> + // CHECK: zexti %{{.*}} : tensor<42xi32> %89 = zexti %tci32 : tensor<42 x i32> to tensor<42 x i64> - // CHECK: = trunci %arg2 : i32 to i16 + // CHECK: = trunci %{{.*}} : i32 to i16 %90 = "std.trunci"(%i) : (i32) -> i16 - // CHECK: = trunci %arg2 : i32 to i16 + // CHECK: = trunci %{{.*}} : i32 to i16 %91 = trunci %i : i32 to i16 - // CHECK: %{{[0-9]+}} = trunci %cst_5 : vector<42xi32> + // CHECK: trunci %{{.*}} : vector<42xi32> %92 = trunci %vci32 : vector<42 x i32> to vector<42 x i16> - // CHECK: %{{[0-9]+}} = trunci %cst_4 : tensor<42xi32> + // CHECK: trunci %{{.*}} : tensor<42xi32> %93 = trunci %tci32 : tensor<42 x i32> to tensor<42 x i16> // CHECK: = fpext {{.*}} : f16 to f32 @@ -383,145 +395,145 @@ // CHECK: = fptrunc {{.*}} : f32 to f16 %95 = fptrunc %f : f32 to f16 - // CHECK: %{{[0-9]+}} = exp %arg1 : f32 + // CHECK: exp %{{.*}} : f32 %96 = "std.exp"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = exp %arg1 : f32 + // CHECK: exp %{{.*}} : f32 %97 = exp %f : f32 - // CHECK: %{{[0-9]+}} = exp %cst_8 : vector<4xf32> + // CHECK: exp %{{.*}} : vector<4xf32> %98 = exp %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = exp %arg0 : tensor<4x4x?xf32> + // CHECK: exp %{{.*}} : tensor<4x4x?xf32> %99 = exp %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = absf %arg1 : f32 + // CHECK: absf %{{.*}} : f32 %100 = "std.absf"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = absf %arg1 : f32 + // CHECK: absf %{{.*}} : f32 %101 = absf %f : f32 - // CHECK: %{{[0-9]+}} = absf %cst_8 : vector<4xf32> + // CHECK: absf %{{.*}} : vector<4xf32> %102 = absf %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = absf %arg0 : tensor<4x4x?xf32> + // CHECK: absf %{{.*}} : tensor<4x4x?xf32> %103 = absf %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = ceilf %arg1 : f32 + // CHECK: ceilf %{{.*}} : f32 %104 = "std.ceilf"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = ceilf %arg1 : f32 + // CHECK: ceilf %{{.*}} : f32 %105 = ceilf %f : f32 - // CHECK: %{{[0-9]+}} = ceilf %cst_8 : vector<4xf32> + // CHECK: ceilf %{{.*}} : vector<4xf32> %106 = ceilf %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = ceilf %arg0 : tensor<4x4x?xf32> + // CHECK: ceilf %{{.*}} : tensor<4x4x?xf32> %107 = ceilf %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = cos %arg1 : f32 + // CHECK: cos %{{.*}} : f32 %108 = "std.cos"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = cos %arg1 : f32 + // CHECK: cos %{{.*}} : f32 %109 = cos %f : f32 - // CHECK: %{{[0-9]+}} = cos %cst_8 : vector<4xf32> + // CHECK: cos %{{.*}} : vector<4xf32> %110 = cos %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = cos %arg0 : tensor<4x4x?xf32> + // CHECK: cos %{{.*}} : tensor<4x4x?xf32> %111 = cos %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = negf %arg1 : f32 + // CHECK: negf %{{.*}} : f32 %112 = "std.negf"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = negf %arg1 : f32 + // CHECK: negf %{{.*}} : f32 %113 = negf %f : f32 - // CHECK: %{{[0-9]+}} = negf %cst_8 : vector<4xf32> + // CHECK: negf %{{.*}} : vector<4xf32> %114 = negf %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = negf %arg0 : tensor<4x4x?xf32> + // CHECK: negf %{{.*}} : tensor<4x4x?xf32> %115 = negf %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = copysign %arg1, %arg1 : f32 + // CHECK: copysign %{{.*}} : f32 %116 = "std.copysign"(%f, %f) : (f32, f32) -> f32 - // CHECK: %{{[0-9]+}} = copysign %arg1, %arg1 : f32 + // CHECK: copysign %{{.*}} : f32 %117 = copysign %f, %f : f32 - // CHECK: %{{[0-9]+}} = copysign %cst_8, %cst_8 : vector<4xf32> + // CHECK: copysign %{{.*}} : vector<4xf32> %118 = copysign %vcf32, %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = copysign %arg0, %arg0 : tensor<4x4x?xf32> + // CHECK: copysign %{{.*}} : tensor<4x4x?xf32> %119 = copysign %t, %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = tanh %arg1 : f32 + // CHECK: tanh %{{.*}} : f32 %120 = "std.tanh"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = tanh %arg1 : f32 + // CHECK: tanh %{{.*}} : f32 %121 = tanh %f : f32 - // CHECK: %{{[0-9]+}} = tanh %cst_8 : vector<4xf32> + // CHECK: tanh %{{.*}} : vector<4xf32> %122 = tanh %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = tanh %arg0 : tensor<4x4x?xf32> + // CHECK: tanh %{{.*}} : tensor<4x4x?xf32> %123 = tanh %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = shift_left %arg2, %arg2 : i32 + // CHECK: shift_left %{{.*}} : i32 %124 = "std.shift_left"(%i, %i) : (i32, i32) -> i32 // CHECK:%{{[0-9]+}} = shift_left %[[I2]], %[[I2]] : i32 %125 = shift_left %i2, %i2 : i32 - // CHECK: %{{[0-9]+}} = shift_left %arg3, %arg3 : index + // CHECK: shift_left %{{.*}} : index %126 = shift_left %idx, %idx : index - // CHECK: %{{[0-9]+}} = shift_left %cst_5, %cst_5 : vector<42xi32> + // CHECK: shift_left %{{.*}} : vector<42xi32> %127 = shift_left %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = shift_left %cst_4, %cst_4 : tensor<42xi32> + // CHECK: shift_left %{{.*}} : tensor<42xi32> %128 = shift_left %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = shift_right_signed %arg2, %arg2 : i32 + // CHECK: shift_right_signed %{{.*}} : i32 %129 = "std.shift_right_signed"(%i, %i) : (i32, i32) -> i32 // CHECK:%{{[0-9]+}} = shift_right_signed %[[I2]], %[[I2]] : i32 %130 = shift_right_signed %i2, %i2 : i32 - // CHECK: %{{[0-9]+}} = shift_right_signed %arg3, %arg3 : index + // CHECK: shift_right_signed %{{.*}} : index %131 = shift_right_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = shift_right_signed %cst_5, %cst_5 : vector<42xi32> + // CHECK: shift_right_signed %{{.*}} : vector<42xi32> %132 = shift_right_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = shift_right_signed %cst_4, %cst_4 : tensor<42xi32> + // CHECK: shift_right_signed %{{.*}} : tensor<42xi32> %133 = shift_right_signed %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg2, %arg2 : i32 + // CHECK: shift_right_unsigned %{{.*}} : i32 %134 = "std.shift_right_unsigned"(%i, %i) : (i32, i32) -> i32 // CHECK:%{{[0-9]+}} = shift_right_unsigned %[[I2]], %[[I2]] : i32 %135 = shift_right_unsigned %i2, %i2 : i32 - // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg3, %arg3 : index + // CHECK: shift_right_unsigned %{{.*}} : index %136 = shift_right_unsigned %idx, %idx : index - // CHECK: %{{[0-9]+}} = shift_right_unsigned %cst_5, %cst_5 : vector<42xi32> + // CHECK: shift_right_unsigned %{{.*}} : vector<42xi32> %137 = shift_right_unsigned %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = shift_right_unsigned %cst_4, %cst_4 : tensor<42xi32> + // CHECK: shift_right_unsigned %{{.*}} : tensor<42xi32> %138 = shift_right_unsigned %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = sqrt %arg1 : f32 + // CHECK: sqrt %{{.*}} : f32 %139 = "std.sqrt"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = sqrt %arg1 : f32 + // CHECK: sqrt %{{.*}} : f32 %140 = sqrt %f : f32 - // CHECK: %{{[0-9]+}} = sqrt %cst_8 : vector<4xf32> + // CHECK: sqrt %{{.*}} : vector<4xf32> %141 = sqrt %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = sqrt %arg0 : tensor<4x4x?xf32> + // CHECK: sqrt %{{.*}} : tensor<4x4x?xf32> %142 = sqrt %t : tensor<4x4x?xf32> // CHECK: = fpext {{.*}} : vector<4xf32> to vector<4xf64> @@ -530,19 +542,19 @@ // CHECK: = fptrunc {{.*}} : vector<4xf32> to vector<4xf16> %144 = fptrunc %vcf32 : vector<4xf32> to vector<4xf16> - // CHECK: %{{[0-9]+}} = rsqrt %arg1 : f32 + // CHECK: rsqrt %{{.*}} : f32 %145 = rsqrt %f : f32 - // CHECK: %{{[0-9]+}} = sin %arg1 : f32 + // CHECK: sin %{{.*}} : f32 %146 = "std.sin"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = sin %arg1 : f32 + // CHECK: sin %{{.*}} : f32 %147 = sin %f : f32 - // CHECK: %{{[0-9]+}} = sin %cst_8 : vector<4xf32> + // CHECK: sin %{{.*}} : vector<4xf32> %148 = sin %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = sin %arg0 : tensor<4x4x?xf32> + // CHECK: sin %{{.*}} : tensor<4x4x?xf32> %149 = sin %t : tensor<4x4x?xf32> // CHECK: = fptosi {{.*}} : f32 to i32 @@ -557,40 +569,40 @@ // CHECK: = fptosi {{.*}} : f16 to i64 %162 = fptosi %half : f16 to i64 - // CHECK: floorf %arg1 : f32 + // CHECK: floorf %{{.*}} : f32 %163 = "std.floorf"(%f) : (f32) -> f32 - // CHECK: %{{[0-9]+}} = floorf %arg1 : f32 + // CHECK: floorf %{{.*}} : f32 %164 = floorf %f : f32 - // CHECK: %{{[0-9]+}} = floorf %cst_8 : vector<4xf32> + // CHECK: floorf %{{.*}} : vector<4xf32> %165 = floorf %vcf32 : vector<4xf32> - // CHECK: %{{[0-9]+}} = floorf %arg0 : tensor<4x4x?xf32> + // CHECK: floorf %{{.*}} : tensor<4x4x?xf32> %166 = floorf %t : tensor<4x4x?xf32> - // CHECK: %{{[0-9]+}} = floordivi_signed %arg2, %arg2 : i32 + // CHECK: floordivi_signed %{{.*}} : i32 %167 = floordivi_signed %i, %i : i32 - // CHECK: %{{[0-9]+}} = floordivi_signed %arg3, %arg3 : index + // CHECK: floordivi_signed %{{.*}} : index %168 = floordivi_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = floordivi_signed %cst_5, %cst_5 : vector<42xi32> + // CHECK: floordivi_signed %{{.*}} : vector<42xi32> %169 = floordivi_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = floordivi_signed %cst_4, %cst_4 : tensor<42xi32> + // CHECK: floordivi_signed %{{.*}} : tensor<42xi32> %170 = floordivi_signed %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = ceildivi_signed %arg2, %arg2 : i32 + // CHECK: ceildivi_signed %{{.*}} : i32 %171 = ceildivi_signed %i, %i : i32 - // CHECK: %{{[0-9]+}} = ceildivi_signed %arg3, %arg3 : index + // CHECK: ceildivi_signed %{{.*}} : index %172 = ceildivi_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = ceildivi_signed %cst_5, %cst_5 : vector<42xi32> + // CHECK: ceildivi_signed %{{.*}} : vector<42xi32> %173 = ceildivi_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = ceildivi_signed %cst_4, %cst_4 : tensor<42xi32> + // CHECK: ceildivi_signed %{{.*}} : tensor<42xi32> %174 = ceildivi_signed %tci32, %tci32 : tensor<42 x i32> return @@ -601,11 +613,11 @@ %i = "std.constant"() {value = 0: index} : () -> index %j = "std.constant"() {value = 1: index} : () -> index - // CHECK: affine.apply #map0(%c0) + // CHECK: affine.apply #map0(%{{.*}}) %a = "affine.apply" (%i) { map = affine_map<(d0) -> (d0 + 1)> } : (index) -> (index) - // CHECK: affine.apply #map1()[%c0] + // CHECK: affine.apply #map1()[%{{.*}}] %b = affine.apply affine_map<()[x] -> (x+1)>()[%i] return @@ -614,16 +626,16 @@ // CHECK-LABEL: func @load_store_prefetch func @load_store_prefetch(memref<4x4xi32>, index) { ^bb0(%0: memref<4x4xi32>, %1: index): - // CHECK: %0 = load %arg0[%arg1, %arg1] : memref<4x4xi32> + // CHECK: load %{{.*}}[%{{.*}}] : memref<4x4xi32> %2 = "std.load"(%0, %1, %1) : (memref<4x4xi32>, index, index)->i32 - // CHECK: %{{.*}} = load %arg0[%arg1, %arg1] : memref<4x4xi32> + // CHECK: load %{{.*}}[%{{.*}}] : memref<4x4xi32> %3 = load %0[%1, %1] : memref<4x4xi32> - // CHECK: prefetch %arg0[%arg1, %arg1], write, locality<1>, data : memref<4x4xi32> + // CHECK: prefetch %{{.*}}[%{{.*}}], write, locality<1>, data : memref<4x4xi32> prefetch %0[%1, %1], write, locality<1>, data : memref<4x4xi32> - // CHECK: prefetch %arg0[%arg1, %arg1], read, locality<3>, instr : memref<4x4xi32> + // CHECK: prefetch %{{.*}}[%{{.*}}], read, locality<3>, instr : memref<4x4xi32> prefetch %0[%1, %1], read, locality<3>, instr : memref<4x4xi32> return @@ -635,38 +647,38 @@ %0 = std.load %arg0[] : memref std.store %0, %arg1[] : memref return - // CHECK: %0 = load %{{.*}}[] : memref - // CHECK: store %{{.*}}, %{{.*}}[] : memref + // CHECK: load %{{.*}}[] : memref + // CHECK: store %{{.*}}[] : memref } // CHECK-LABEL: func @return_op(%arg0: i32) -> i32 { func @return_op(%a : i32) -> i32 { - // CHECK: return %arg0 : i32 + // CHECK: return %{{.*}} : i32 "std.return" (%a) : (i32)->() } // CHECK-LABEL: func @calls(%arg0: i32) { func @calls(%arg0: i32) { - // CHECK: %0 = call @return_op(%arg0) : (i32) -> i32 + // CHECK: call @return_op(%{{.*}}) : (i32) -> i32 %x = call @return_op(%arg0) : (i32) -> i32 - // CHECK: %1 = call @return_op(%0) : (i32) -> i32 + // CHECK: call @return_op(%{{.*}}) : (i32) -> i32 %y = call @return_op(%x) : (i32) -> i32 - // CHECK: %2 = call @return_op(%0) : (i32) -> i32 + // CHECK: call @return_op(%{{.*}}) : (i32) -> i32 %z = "std.call"(%x) {callee = @return_op} : (i32) -> i32 - // CHECK: %f = constant @affine_apply : () -> () + // CHECK: constant @affine_apply : () -> () %f = constant @affine_apply : () -> () - // CHECK: call_indirect %f() : () -> () + // CHECK: call_indirect %{{.*}}() : () -> () call_indirect %f() : () -> () - // CHECK: %f_0 = constant @return_op : (i32) -> i32 + // CHECK: constant @return_op : (i32) -> i32 %f_0 = constant @return_op : (i32) -> i32 - // CHECK: %3 = call_indirect %f_0(%arg0) : (i32) -> i32 + // CHECK: call_indirect %{{.*}}(%{{.*}}) : (i32) -> i32 %2 = call_indirect %f_0(%arg0) : (i32) -> i32 - // CHECK: %4 = call_indirect %f_0(%arg0) : (i32) -> i32 + // CHECK: call_indirect %{{.*}}(%{{.*}}) : (i32) -> i32 %3 = "std.call_indirect"(%f_0, %arg0) : ((i32) -> i32, i32) -> i32 return @@ -676,10 +688,10 @@ func @extract_element(%arg0: tensor<*xi32>, %arg1 : tensor<4x4xf32>) -> i32 { %c0 = "std.constant"() {value = 0: index} : () -> index - // CHECK: %0 = extract_element %arg0[%c0, %c0, %c0, %c0] : tensor<*xi32> + // CHECK: extract_element %{{.*}}[%{{.*}}] : tensor<*xi32> %0 = extract_element %arg0[%c0, %c0, %c0, %c0] : tensor<*xi32> - // CHECK: %1 = extract_element %arg1[%c0, %c0] : tensor<4x4xf32> + // CHECK: extract_element %{{.*}}[%{{.*}}] : tensor<4x4xf32> %1 = extract_element %arg1[%c0, %c0] : tensor<4x4xf32> return %0 : i32 @@ -688,16 +700,16 @@ // CHECK-LABEL: func @tensor_from_elements() { func @tensor_from_elements() { %c0 = "std.constant"() {value = 0: index} : () -> index - // CHECK: %0 = tensor_from_elements %c0 : tensor<1xindex> + // CHECK: tensor_from_elements %{{.*}} : tensor<1xindex> %0 = tensor_from_elements %c0 : tensor<1xindex> %c1 = "std.constant"() {value = 1: index} : () -> index - // CHECK: %1 = tensor_from_elements %c0, %c1 : tensor<2xindex> + // CHECK: tensor_from_elements %{{.*}} : tensor<2xindex> %1 = tensor_from_elements %c0, %c1 : tensor<2xindex> %c0_f32 = "std.constant"() {value = 0.0: f32} : () -> f32 // CHECK: [[C0_F32:%.*]] = constant - // CHECK: %2 = tensor_from_elements [[C0_F32]] : tensor<1xf32> + // CHECK: tensor_from_elements [[C0_F32]] : tensor<1xf32> %2 = tensor_from_elements %c0_f32 : tensor<1xf32> // CHECK: tensor_from_elements : tensor<0xindex> @@ -708,16 +720,16 @@ // CHECK-LABEL: func @tensor_cast(%arg0 func @tensor_cast(%arg0: tensor<*xf32>, %arg1 : tensor<4x4xf32>, %arg2: tensor) { - // CHECK: %0 = tensor_cast %arg0 : tensor<*xf32> to tensor + // CHECK: tensor_cast %{{.*}} : tensor<*xf32> to tensor %0 = tensor_cast %arg0 : tensor<*xf32> to tensor - // CHECK: %1 = tensor_cast %arg1 : tensor<4x4xf32> to tensor<*xf32> + // CHECK: tensor_cast %{{.*}} : tensor<4x4xf32> to tensor<*xf32> %1 = tensor_cast %arg1 : tensor<4x4xf32> to tensor<*xf32> - // CHECK: %2 = tensor_cast %arg2 : tensor to tensor<4x?xf32> + // CHECK: tensor_cast %{{.*}} : tensor to tensor<4x?xf32> %2 = tensor_cast %arg2 : tensor to tensor<4x?xf32> - // CHECK: %3 = tensor_cast %2 : tensor<4x?xf32> to tensor + // CHECK: tensor_cast %{{.*}} : tensor<4x?xf32> to tensor %3 = tensor_cast %2 : tensor<4x?xf32> to tensor return @@ -725,13 +737,13 @@ // CHECK-LABEL: func @memref_cast(%arg0 func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref, %arg2 : memref<64x16x4xf32, offset: 0, strides: [64, 4, 1]>) { - // CHECK: %0 = memref_cast %arg0 : memref<4xf32> to memref + // CHECK: memref_cast %{{.*}} : memref<4xf32> to memref %0 = memref_cast %arg0 : memref<4xf32> to memref - // CHECK: %1 = memref_cast %arg1 : memref to memref<4xf32> + // CHECK: memref_cast %{{.*}} : memref to memref<4xf32> %1 = memref_cast %arg1 : memref to memref<4xf32> - // CHECK: {{%.*}} = memref_cast %arg2 : memref<64x16x4xf32, #[[$BASE_MAP0]]> to memref<64x16x4xf32, #[[$BASE_MAP3]]> + // CHECK: {{%.*}} = memref_cast %{{.*}} : memref<64x16x4xf32, #[[$BASE_MAP0]]> to memref<64x16x4xf32, #[[$BASE_MAP3]]> %2 = memref_cast %arg2 : memref<64x16x4xf32, offset: 0, strides: [64, 4, 1]> to memref<64x16x4xf32, offset: ?, strides: [?, ?, ?]> // CHECK: {{%.*}} = memref_cast {{%.*}} : memref<64x16x4xf32, #[[$BASE_MAP3]]> to memref<64x16x4xf32, #[[$BASE_MAP0]]> @@ -754,15 +766,15 @@ func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> // Test two dynamic sizes and dynamic offset. - // CHECK: %{{.*}} = std.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref + // CHECK: std.view %{{.*}}[%{{.*}}][%{{.*}}] : memref<2048xi8> to memref %1 = view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref // Test one dynamic size and dynamic offset. - // CHECK: %{{.*}} = std.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32> + // CHECK: std.view %{{.*}}[%{{.*}}][%{{.*}}] : memref<2048xi8> to memref<4x?xf32> %3 = view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32> // Test static sizes and static offset. - // CHECK: %{{.*}} = std.view %0[{{.*}}][] : memref<2048xi8> to memref<64x4xf32> + // CHECK: std.view %{{.*}}[{{.*}}][] : memref<2048xi8> to memref<64x4xf32> %c0 = constant 0: index %5 = view %0[%c0][] : memref<2048xi8> to memref<64x4xf32> return @@ -774,94 +786,91 @@ %c1 = constant 1 : index %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> - // CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] : - // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> - // CHECK-SAME: to memref + // CHECK: subview %{{.*}}: memref<8x16x4xf32, #[[$BASE_MAP0]]> to memref %1 = subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1] : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to memref %2 = alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> - // CHECK: subview %2[%c1] [%arg0] [%c1] : - // CHECK-SAME: memref<64xf32, #[[$BASE_MAP1]]> - // CHECK-SAME: to memref + // CHECK: subview %{{.*}}: memref<64xf32, #[[$BASE_MAP1]]> to memref %3 = subview %2[%c1][%arg0][%c1] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to memref (d0 * s1 + s0)>> %4 = alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>> - // CHECK: subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] : - // CHECK-SAME: memref<64x22xf32, #[[$BASE_MAP2]]> - // CHECK-SAME: to memref + // CHECK: subview %{{.*}}: + // CHECK-SAME: memref<64x22xf32, #[[$BASE_MAP2]]> to memref %5 = subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0] : memref<64x22xf32, offset:0, strides: [22, 1]> to memref - // CHECK: subview %0[0, 2, 0] [4, 4, 4] [1, 1, 1] : - // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> - // CHECK-SAME: to memref<4x4x4xf32, #[[$SUBVIEW_MAP3]]> + // CHECK: subview %{{.*}}[0, 2, 0] [4, 4, 4] [1, 1, 1] : + // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to memref<4x4x4xf32, #[[$SUBVIEW_MAP3]]> %6 = subview %0[0, 2, 0][4, 4, 4][1, 1, 1] : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to memref<4x4x4xf32, offset:8, strides: [64, 4, 1]> %7 = alloc(%arg1, %arg2) : memref // CHECK: subview {{%.*}}[0, 0] [4, 4] [1, 1] : - // CHECK-SAME: memref - // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP4]]> + // CHECK-SAME: memref to memref<4x4xf32, #[[$SUBVIEW_MAP4]]> %8 = subview %7[0, 0][4, 4][1, 1] : memref to memref<4x4xf32, offset: ?, strides:[?, 1]> %9 = alloc() : memref<16x4xf32> - // CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [{{%.*}}, {{%.*}}] : - // CHECK-SAME: memref<16x4xf32> - // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP2]] + // CHECK: subview {{%.*}}[{{%.*}}] [4, 4] [{{%.*}}] : + // CHECK-SAME: memref<16x4xf32> to memref<4x4xf32, #[[$SUBVIEW_MAP2]] %10 = subview %9[%arg1, %arg1][4, 4][%arg2, %arg2] : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]> - // CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [2, 2] : - // CHECK-SAME: memref<16x4xf32> - // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP5]] + // CHECK: subview {{%.*}}[{{%.*}}] [4, 4] [2, 2] : + // CHECK-SAME: memref<16x4xf32> to memref<4x4xf32, #[[$SUBVIEW_MAP5]] %11 = subview %9[%arg1, %arg2][4, 4][2, 2] : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[8, 2]> + // CHECK: alloc() : memref<1x9x1x4x1xf32 %12 = alloc() : memref<1x9x1x4x1xf32, affine_map<(d0, d1, d2, d3, d4) -> (36 * d0 + 36 * d1 + 4 * d2 + 4 * d3 + d4)>> - // CHECK: subview %12[%arg1, %arg1, %arg1, %arg1, %arg1] - // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] : + // CHECK: subview %{{.*}}[%{{.*}}] [1, 9, 1, 4, 1] [%{{.*}}] : // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<9x4xf32, #[[$SUBVIEW_MAP2]]> - %13 = subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<9x4xf32, offset: ?, strides: [?, ?]> - // CHECK: subview %12[%arg1, %arg1, %arg1, %arg1, %arg1] - // CHECK-SAME: [1, 9, 1, 4, 1] [%arg2, %arg2, %arg2, %arg2, %arg2] : - // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<1x9x4xf32, #[[$BASE_MAP3]]> - %14 = subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<1x9x4xf32, offset: ?, strides: [?, ?, ?]> - - %15 = alloc(%arg1, %arg2)[%c0, %c1, %arg1, %arg0, %arg0, %arg2, %arg2] : memref<1x?x5x1x?x1xf32, affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5, s6] -> (s0 + s1 * d0 + s2 * d1 + s3 * d2 + s4 * d3 + s5 * d4 + s6 * d5)>> - // CHECK: subview %15[0, 0, 0, 0, 0, 0] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] : + %13 = subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 4, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : + memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<9x4xf32, offset: ?, strides: [?, ?]> + // CHECK: subview %{{.*}}[%{{.*}}] [1, 9, 1, 2, 1] [%{{.*}}] : + // CHECK-SAME: memref<1x9x1x4x1xf32, #[[$SUBVIEW_MAP6]]> to memref<1x9x2xf32, #[[$BASE_MAP3]]> + %14 = subview %12[%arg1, %arg1, %arg1, %arg1, %arg1][1, 9, 1, 2, 1][%arg2, %arg2, %arg2, %arg2, %arg2] : + memref<1x9x1x4x1xf32, offset: 0, strides: [36, 36, 4, 4, 1]> to memref<1x9x2xf32, offset: ?, strides: [?, ?, ?]> + + %15 = alloc(%arg1, %arg2)[%c0, %c1, %arg1, %arg0, %arg0, %arg2, %arg2] : + memref<1x?x5x1x?x1xf32, affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5, s6] -> (s0 + s1 * d0 + s2 * d1 + s3 * d2 + s4 * d3 + s5 * d4 + s6 * d5)>> + // CHECK: subview %{{.*}}[0, 0, 0, 0, 0, 0] [1, %{{.*}}, 5, 1, %{{.*}}, 1] [1, 1, 1, 1, 1, 1] : // CHECK-SAME: memref<1x?x5x1x?x1xf32, #[[$SUBVIEW_MAP7]]> to memref - %16 = subview %15[0, 0, 0, 0, 0, 0][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref - // CHECK: subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1] [1, %arg1, 5, 1, %arg2, 1] [1, 1, 1, 1, 1, 1] : + %16 = subview %15[0, 0, 0, 0, 0, 0][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : + memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref + // CHECK: subview %{{.*}}[%{{.*}}] [1, %{{.*}}, 5, 1, %{{.*}}, 1] [1, 1, 1, 1, 1, 1] : // CHECK-SAME: memref<1x?x5x1x?x1xf32, #[[$SUBVIEW_MAP7]]> to memref - %17 = subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref + %17 = subview %15[%arg1, %arg1, %arg1, %arg1, %arg1, %arg1][1, %arg1, 5, 1, %arg2, 1][1, 1, 1, 1, 1, 1] : + memref<1x?x5x1x?x1xf32, offset: ?, strides: [?, ?, ?, ?, ?, ?]> to memref %18 = alloc() : memref<1x8xf32> - // CHECK: subview %18[0, 0] [1, 8] [1, 1] : memref<1x8xf32> to memref<8xf32> + // CHECK: subview %{{.*}}[0, 0] [1, 8] [1, 1] : memref<1x8xf32> to memref<8xf32> %19 = subview %18[0, 0][1, 8][1, 1] : memref<1x8xf32> to memref<8xf32> %20 = alloc() : memref<8x16x4xf32> - // CHECK: subview %20[0, 0, 0] [1, 16, 4] [1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> + // CHECK: subview %{{.*}}[0, 0, 0] [1, 16, 4] [1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> %21 = subview %20[0, 0, 0][1, 16, 4][1, 1, 1] : memref<8x16x4xf32> to memref<16x4xf32> - %22 = subview %20[3, 4, 2][1, 6, 3][1, 1, 1] : memref<8x16x4xf32> to memref<6x3xf32, offset: 210, strides: [4, 1]> + %22 = subview %20[3, 4, 2][1, 6, 3][1, 1, 1] : + memref<8x16x4xf32> to memref<6x3xf32, offset: 210, strides: [4, 1]> %23 = alloc() : memref %78 = subview %23[] [] [] : memref to memref + return } // CHECK-LABEL: func @test_dimop -// CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32> +// CHECK-SAME: %[[ARG:[0-9a-z]*]]: tensor<4x4x?xf32> func @test_dimop(%arg0: tensor<4x4x?xf32>) { // CHECK: %[[C2:.*]] = constant 2 : index - // CHECK: %{{.*}} = dim %[[ARG]], %[[C2]] : tensor<4x4x?xf32> + // CHECK: dim %[[ARG]], %[[C2]] : tensor<4x4x?xf32> %c2 = constant 2 : index %0 = dim %arg0, %c2 : tensor<4x4x?xf32> // use dim as an index to ensure type correctness @@ -870,7 +879,7 @@ } // CHECK-LABEL: func @test_splat_op -// CHECK-SAME: [[S:%arg[0-9]+]]: f32 +// CHECK-SAME: [[S:%[0-9a-z]+]]: f32 func @test_splat_op(%s : f32) { %v = splat %s : vector<8xf32> // CHECK: splat [[S]] : vector<8xf32> @@ -900,7 +909,7 @@ } // CHECK-LABEL: func @atomic_rmw -// CHECK-SAME: ([[BUF:%.*]]: memref<10xf32>, [[VAL:%.*]]: f32, [[I:%.*]]: index) +// CHECK-SAME: ([[BUF:%[0-9a-z]*]]: memref<10xf32>, [[VAL:%[0-9a-z]*]]: f32, [[I:%[0-9a-z]*]]: index) func @atomic_rmw(%I: memref<10xf32>, %val: f32, %i : index) { %x = atomic_rmw "addf" %val, %I[%i] : (f32, memref<10xf32>) -> f32 // CHECK: atomic_rmw "addf" [[VAL]], [[BUF]]{{\[}}[[I]]] @@ -908,7 +917,7 @@ } // CHECK-LABEL: func @generic_atomic_rmw -// CHECK-SAME: ([[BUF:%.*]]: memref<1x2xf32>, [[I:%.*]]: index, [[J:%.*]]: index) +// CHECK-SAME: ([[BUF:%[0-9a-z]*]]: memref<1x2xf32>, [[I:%[0-9a-z]*]]: index, [[J:%[0-9a-z]*]]: index) func @generic_atomic_rmw(%I: memref<1x2xf32>, %i : index, %j : index) { %x = generic_atomic_rmw %I[%i, %j] : memref<1x2xf32> { // CHECK-NEXT: generic_atomic_rmw [[BUF]]{{\[}}[[I]], [[J]]] : memref @@ -922,7 +931,7 @@ } // CHECK-LABEL: func @assume_alignment -// CHECK-SAME: %[[MEMREF:.*]]: memref<4x4xf16> +// CHECK-SAME: %[[MEMREF:[0-9a-z]*]]: memref<4x4xf16> func @assume_alignment(%0: memref<4x4xf16>) { // CHECK: assume_alignment %[[MEMREF]], 16 : memref<4x4xf16> assume_alignment %0, 16 : memref<4x4xf16>