diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml @@ -92,6 +92,216 @@ - !ScalarExpression scalar_arg: I --- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: exp + cpp_class_name: ExpOp + doc: |- + Applies exp(x) elementwise. + + No numeric casting is performed on the input operand. +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<() -> ()> + - affine_map<() -> ()> + iterator_types: [] + assignments: + - !ScalarAssign + arg: O + value: !ScalarExpression + scalar_fn: + kind: unary + fn_name: exp + operands: + - !ScalarExpression + scalar_arg: I +--- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: log + cpp_class_name: LogOp + doc: |- + Applies log(x) elementwise. + + No numeric casting is performed on the input operand. +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<() -> ()> + - affine_map<() -> ()> + iterator_types: [] + assignments: + - !ScalarAssign + arg: O + value: !Scalarlogression + scalar_fn: + kind: unary + fn_name: log + operands: + - !Scalarlogression + scalar_arg: I +--- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: abs + cpp_class_name: AbsOp + doc: |- + Applies abs(x) elementwise. + + No numeric casting is performed on the input operand. +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<() -> ()> + - affine_map<() -> ()> + iterator_types: [] + assignments: + - !ScalarAssign + arg: O + value: !Scalarabsression + scalar_fn: + kind: unary + fn_name: abs + operands: + - !ScalarExpression + scalar_arg: I +--- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: ceil + cpp_class_name: CeilOp + doc: |- + Applies ceil(x) elementwise. + + No numeric casting is performed on the input operand. +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<() -> ()> + - affine_map<() -> ()> + iterator_types: [] + assignments: + - !ScalarAssign + arg: O + value: !Scalarceilression + scalar_fn: + kind: unary + fn_name: ceil + operands: + - !Scalarceilression + scalar_arg: I +--- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: floor + cpp_class_name: FloorOp + doc: |- + Applies floor(x) elementwise. + + No numeric casting is performed on the input operand. +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<() -> ()> + - affine_map<() -> ()> + iterator_types: [] + assignments: + - !ScalarAssign + arg: O + value: !Scalarfloorression + scalar_fn: + kind: unary + fn_name: floor + operands: + - !Scalarfloorression + scalar_arg: I +--- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: negf + cpp_class_name: NegFOp + doc: |- + Applies negf(x) elementwise. + + No numeric casting is performed on the input operand. +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: T1 + shape_map: affine_map<() -> ()> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<() -> ()> + - affine_map<() -> ()> + iterator_types: [] + assignments: + - !ScalarAssign + arg: O + value: !Scalarnegfression + scalar_fn: + kind: unary + fn_name: negf + operands: + - !ScalarExpression + scalar_arg: I +--- !LinalgOpConfig metadata: !LinalgOpMetadata name: elemwise_binary cpp_class_name: ElemwiseBinaryOp diff --git a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py --- a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py +++ b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py @@ -35,6 +35,78 @@ O[None] = fun(cast(U, I[None])) +@linalg_structured_op +def exp( + I=TensorDef(T1), + O=TensorDef(T1, output=True), +): + """Applies exp(x) elementwise. + + No numeric casting is performed on the input operand. + """ + O[None] = UnaryFn.exp(I[None]) + + +@linalg_structured_op +def log( + I=TensorDef(T1), + O=TensorDef(T1, output=True), +): + """Applies log(x) elementwise. + + No numeric casting is performed on the input operand. + """ + O[None] = UnaryFn.log(I[None]) + + +@linalg_structured_op +def abs( + I=TensorDef(T1), + O=TensorDef(T1, output=True), +): + """Applies abs(x) elementwise. + + No numeric casting is performed on the input operand. + """ + O[None] = UnaryFn.abs(I[None]) + + +@linalg_structured_op +def ceil( + I=TensorDef(T1), + O=TensorDef(T1, output=True), +): + """Applies ceil(x) elementwise. + + No numeric casting is performed on the input operand. + """ + O[None] = UnaryFn.ceil(I[None]) + + +@linalg_structured_op +def floor( + I=TensorDef(T1), + O=TensorDef(T1, output=True), +): + """Applies floor(x) elementwise. + + No numeric casting is performed on the input operand. + """ + O[None] = UnaryFn.floor(I[None]) + + +@linalg_structured_op +def negf( + I=TensorDef(T1), + O=TensorDef(T1, output=True), +): + """Applies negf(x) elementwise. + + No numeric casting is performed on the input operand. + """ + O[None] = UnaryFn.negf(I[None]) + + @linalg_structured_op def elemwise_binary( lhs=TensorDef(T1), @@ -67,7 +139,7 @@ a `linalg.broadcast` + `linalg.add` sequence can be lowered to a `linalg.generic` with different affine maps for the two operands. """ - O[None] = lhs[None] + rhs[None] + O[None] = BinaryFn.add(lhs[None], rhs[None]) @linalg_structured_op @@ -86,7 +158,7 @@ a `linalg.broadcast` + `linalg.sub` sequence can be lowered to a `linalg.generic` with different affine maps for the two operands. """ - O[None] = lhs[None] - rhs[None] + O[None] = BinaryFn.sub(lhs[None], rhs[None]) @linalg_structured_op @@ -105,7 +177,7 @@ a `linalg.broadcast` + `linalg.mul` sequence can be lowered to a `linalg.generic` with different affine maps for the two operands. """ - O[None] = lhs[None] * rhs[None] + O[None] = BinaryFn.mul(lhs[None], rhs[None]) @linalg_structured_op @@ -124,7 +196,7 @@ a `linalg.broadcast` + `linalg.div` sequence can be lowered to a `linalg.generic` with different affine maps for the two operands. """ - O[None] = lhs[None] / rhs[None] + O[None] = BinaryFn.div(lhs[None], rhs[None]) @linalg_structured_op diff --git a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir --- a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir +++ b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir @@ -296,7 +296,7 @@ return } -// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK: func @generalize_add // CHECK-SAME: (%[[LHS:.+]]: memref<7x14x21xf32>, %[[RHS:.+]]: memref<7x14x21xf32>, @@ -321,7 +321,7 @@ return } -// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK: func @generalize_sub // CHECK-SAME: (%[[LHS:.+]]: memref<7x14x21xf32>, %[[RHS:.+]]: memref<7x14x21xf32>, @@ -334,8 +334,8 @@ // CHECK-SAME: outs(%[[OUT]] : memref<7x14x21xf32>) // CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32, %[[BBARG2:.+]]: f32) -// CHECK-NEXT: %[[SUM:.+]] = arith.subf %[[BBARG0]], %[[BBARG1]] : f32 -// CHECK-NEXT: linalg.yield %[[SUM]] : f32 +// CHECK-NEXT: %[[SUB:.+]] = arith.subf %[[BBARG0]], %[[BBARG1]] : f32 +// CHECK-NEXT: linalg.yield %[[SUB]] : f32 // ----- @@ -346,7 +346,7 @@ return } -// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK: func @generalize_mul // CHECK-SAME: (%[[LHS:.+]]: memref<7x14x21xf32>, %[[RHS:.+]]: memref<7x14x21xf32>, @@ -359,8 +359,8 @@ // CHECK-SAME: outs(%[[OUT]] : memref<7x14x21xf32>) // CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32, %[[BBARG2:.+]]: f32) -// CHECK-NEXT: %[[SUM:.+]] = arith.mulf %[[BBARG0]], %[[BBARG1]] : f32 -// CHECK-NEXT: linalg.yield %[[SUM]] : f32 +// CHECK-NEXT: %[[MUL:.+]] = arith.mulf %[[BBARG0]], %[[BBARG1]] : f32 +// CHECK-NEXT: linalg.yield %[[MUL]] : f32 // ----- @@ -371,7 +371,7 @@ return } -// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK: func @generalize_div // CHECK-SAME: (%[[LHS:.+]]: memref<7x14x21xf32>, %[[RHS:.+]]: memref<7x14x21xf32>, @@ -384,8 +384,8 @@ // CHECK-SAME: outs(%[[OUT]] : memref<7x14x21xf32>) // CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32, %[[BBARG2:.+]]: f32) -// CHECK-NEXT: %[[SUM:.+]] = arith.divf %[[BBARG0]], %[[BBARG1]] : f32 -// CHECK-NEXT: linalg.yield %[[SUM]] : f32 +// CHECK-NEXT: %[[DIV:.+]] = arith.divf %[[BBARG0]], %[[BBARG1]] : f32 +// CHECK-NEXT: linalg.yield %[[DIV]] : f32 // ----- @@ -396,7 +396,7 @@ return } -// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK: func @generalize_divu // CHECK-SAME: (%[[LHS:.+]]: memref<7x14x21xi32>, %[[RHS:.+]]: memref<7x14x21xi32>, @@ -409,5 +409,131 @@ // CHECK-SAME: outs(%[[OUT]] : memref<7x14x21xi32>) // CHECK: ^{{.+}}(%[[BBARG0:.+]]: i32, %[[BBARG1:.+]]: i32, %[[BBARG2:.+]]: i32) -// CHECK-NEXT: %[[SUM:.+]] = arith.divui %[[BBARG0]], %[[BBARG1]] : i32 -// CHECK-NEXT: linalg.yield %[[SUM]] : i32 +// CHECK-NEXT: %[[DIVU:.+]] = arith.divui %[[BBARG0]], %[[BBARG1]] : i32 +// CHECK-NEXT: linalg.yield %[[DIVU]] : i32 + +// ----- + +func.func @generalize_exp(%arg: memref<7x14x21xf32>, %out: memref<7x14x21xf32>) { + linalg.exp ins(%arg : memref<7x14x21xf32>) outs(%out : memref<7x14x21xf32>) + return +} + +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> + +// CHECK: func @generalize_exp +// CHECK-SAME: (%[[ARG:.+]]: memref<7x14x21xf32>, %[[OUT:.+]]: memref<7x14x21xf32>) + +// CHECK: linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]]] +// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]} +// CHECK-SAME: ins(%[[LHS]] : memref<7x14x21xf32>) outs(%[[OUT]] : memref<7x14x21xf32>) + +// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32) +// CHECK-NEXT: %[[EXP:.+]] = math.exp %[[BBARG0]] : f32 +// CHECK-NEXT: linalg.yield %[[EXP]] : f32 + +// ----- + +func.func @generalize_log(%arg: memref<7x14x21xf32>, %out: memref<7x14x21xf32>) { + linalg.log ins(%arg : memref<7x14x21xf32>) outs(%out : memref<7x14x21xf32>) + return +} + +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> + +// CHECK: func @generalize_log +// CHECK-SAME: (%[[ARG:.+]]: memref<7x14x21xf32>, %[[OUT:.+]]: memref<7x14x21xf32>) + +// CHECK: linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]]] +// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]} +// CHECK-SAME: ins(%[[LHS]] : memref<7x14x21xf32>) outs(%[[OUT]] : memref<7x14x21xf32>) + +// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32) +// CHECK-NEXT: %[[log:.+]] = math.log %[[BBARG0]] : f32 +// CHECK-NEXT: linalg.yield %[[log]] : f32 + +// ----- + +func.func @generalize_abs(%arg: memref<7x14x21xf32>, %out: memref<7x14x21xf32>) { + linalg.abs ins(%arg : memref<7x14x21xf32>) outs(%out : memref<7x14x21xf32>) + return +} + +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> + +// CHECK: func @generalize_abs +// CHECK-SAME: (%[[ARG:.+]]: memref<7x14x21xf32>, %[[OUT:.+]]: memref<7x14x21xf32>) + +// CHECK: linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]]] +// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]} +// CHECK-SAME: ins(%[[LHS]] : memref<7x14x21xf32>) outs(%[[OUT]] : memref<7x14x21xf32>) + +// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32) +// CHECK-NEXT: %[[abs:.+]] = math.absf %[[BBARG0]] : f32 +// CHECK-NEXT: linalg.yield %[[abs]] : f32 + +// ----- + +func.func @generalize_ceil(%arg: memref<7x14x21xf32>, %out: memref<7x14x21xf32>) { + linalg.ceil ins(%arg : memref<7x14x21xf32>) outs(%out : memref<7x14x21xf32>) + return +} + +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> + +// CHECK: func @generalize_ceil +// CHECK-SAME: (%[[ARG:.+]]: memref<7x14x21xf32>, %[[OUT:.+]]: memref<7x14x21xf32>) + +// CHECK: linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]]] +// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]} +// CHECK-SAME: ins(%[[LHS]] : memref<7x14x21xf32>) outs(%[[OUT]] : memref<7x14x21xf32>) + +// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32) +// CHECK-NEXT: %[[ceil:.+]] = math.ceil %[[BBARG0]] : f32 +// CHECK-NEXT: linalg.yield %[[ceil]] : f32 + +// ----- + +func.func @generalize_floor(%arg: memref<7x14x21xf32>, %out: memref<7x14x21xf32>) { + linalg.floor ins(%arg : memref<7x14x21xf32>) outs(%out : memref<7x14x21xf32>) + return +} + +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> + +// CHECK: func @generalize_floor +// CHECK-SAME: (%[[ARG:.+]]: memref<7x14x21xf32>, %[[OUT:.+]]: memref<7x14x21xf32>) + +// CHECK: linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]]] +// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]} +// CHECK-SAME: ins(%[[LHS]] : memref<7x14x21xf32>) outs(%[[OUT]] : memref<7x14x21xf32>) + +// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32) +// CHECK-NEXT: %[[floor:.+]] = math.floor %[[BBARG0]] : f32 +// CHECK-NEXT: linalg.yield %[[floor]] : f32 + +// ----- + +func.func @generalize_negf(%arg: memref<7x14x21xf32>, %out: memref<7x14x21xf32>) { + linalg.negf ins(%arg : memref<7x14x21xf32>) outs(%out : memref<7x14x21xf32>) + return +} + +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> + +// CHECK: func @generalize_negf +// CHECK-SAME: (%[[ARG:.+]]: memref<7x14x21xf32>, %[[OUT:.+]]: memref<7x14x21xf32>) + +// CHECK: linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]], #[[MAP]]] +// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]} +// CHECK-SAME: ins(%[[LHS]] : memref<7x14x21xf32>) outs(%[[OUT]] : memref<7x14x21xf32>) + +// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32) +// CHECK-NEXT: %[[negf:.+]] = arith.negf %[[BBARG0]] : f32 +// CHECK-NEXT: linalg.yield %[[negf]] : f32 diff --git a/mlir/test/Dialect/Linalg/named-ops-fail.mlir b/mlir/test/Dialect/Linalg/named-ops-fail.mlir --- a/mlir/test/Dialect/Linalg/named-ops-fail.mlir +++ b/mlir/test/Dialect/Linalg/named-ops-fail.mlir @@ -77,3 +77,99 @@ linalg.div_unsigned ins(%arg0, %arg1 : memref<8x16xi32>, memref<4x8x16xi32>) outs(%arg2: memref<4x8x16xi32>) return } + +// ----- + +func.func @exp_type_cast(%arg: memref<4x8x16xf16>, %out: memref<4x8x16xf32>) { + // CHECK: operand 1 ('f16') doesn't match the element type of the enclosing linalg.generic op ('f32') + linalg.exp ins(%arg : memref<4x8x16xf16>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @exp_broadcast(%arg: memref<8x16xf32>, %out: memref<4x8x16xf32>) { + // CHECK: op expected operand rank (2) to match the result rank of indexing_map #0 (3) + linalg.exp ins(%arg : memref<8x16xf32>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @log_type_cast(%arg: memref<4x8x16xf16>, %out: memref<4x8x16xf32>) { + // CHECK: operand 1 ('f16') doesn't match the element type of the enclosing linalg.generic op ('f32') + linalg.log ins(%arg : memref<4x8x16xf16>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @log_broadcast(%arg: memref<8x16xf32>, %out: memref<4x8x16xf32>) { + // CHECK: op expected operand rank (2) to match the result rank of indexing_map #0 (3) + linalg.log ins(%arg : memref<8x16xf32>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @abs_type_cast(%arg: memref<4x8x16xf16>, %out: memref<4x8x16xf32>) { + // CHECK: operand 1 ('f16') doesn't match the element type of the enclosing linalg.generic op ('f32') + linalg.abs ins(%arg : memref<4x8x16xf16>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @abs_broadcast(%arg: memref<8x16xf32>, %out: memref<4x8x16xf32>) { + // CHECK: op expected operand rank (2) to match the result rank of indexing_map #0 (3) + linalg.abs ins(%arg : memref<8x16xf32>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @ceil_type_cast(%arg: memref<4x8x16xf16>, %out: memref<4x8x16xf32>) { + // CHECK: operand 1 ('f16') doesn't match the element type of the enclosing linalg.generic op ('f32') + linalg.ceil ins(%arg : memref<4x8x16xf16>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @ceil_broadcast(%arg: memref<8x16xf32>, %out: memref<4x8x16xf32>) { + // CHECK: op expected operand rank (2) to match the result rank of indexing_map #0 (3) + linalg.ceil ins(%arg : memref<8x16xf32>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @floor_type_cast(%arg: memref<4x8x16xf16>, %out: memref<4x8x16xf32>) { + // CHECK: operand 1 ('f16') doesn't match the element type of the enclosing linalg.generic op ('f32') + linalg.floor ins(%arg : memref<4x8x16xf16>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @floor_broadcast(%arg: memref<8x16xf32>, %out: memref<4x8x16xf32>) { + // CHECK: op expected operand rank (2) to match the result rank of indexing_map #0 (3) + linalg.floor ins(%arg : memref<8x16xf32>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @negf_type_cast(%arg: memref<4x8x16xf16>, %out: memref<4x8x16xf32>) { + // CHECK: operand 1 ('f16') doesn't match the element type of the enclosing linalg.generic op ('f32') + linalg.negf ins(%arg : memref<4x8x16xf16>) outs(%out: memref<4x8x16xf32>) + return +} + +// ----- + +func.func @negf_broadcast(%arg: memref<8x16xf32>, %out: memref<4x8x16xf32>) { + // CHECK: op expected operand rank (2) to match the result rank of indexing_map #0 (3) + linalg.negf ins(%arg : memref<8x16xf32>) outs(%out: memref<4x8x16xf32>) + return +} diff --git a/mlir/test/Dialect/Linalg/named-ops.mlir b/mlir/test/Dialect/Linalg/named-ops.mlir --- a/mlir/test/Dialect/Linalg/named-ops.mlir +++ b/mlir/test/Dialect/Linalg/named-ops.mlir @@ -1354,3 +1354,189 @@ %1 = linalg.div_unsigned ins(%arg0, %arg1 : tensor<4x8x16xi32>, tensor<4x8x16xi32>) outs(%0: tensor<4x8x16xi32>) -> tensor<4x8x16xi32> return %1 : tensor<4x8x16xi32> } + +// ----- + +// CHECK-LABEL: func @exp_dynamic +func.func @exp_dynamic(%arg0: memref, %arg1: memref) { + // CHECK: linalg.exp + // CHECK-SAME: ins(%{{.+}} : memref) outs(%{{.+}} : memref) + linalg.exp ins(%arg0 : memref) outs(%arg1: memref) + return +} + +// ----- + +// CHECK-LABEL: func @exp_static +func.func @exp_static(%arg0: memref<4x8x16xf32>, %arg1: memref<4x8x16xf32>) { + // CHECK: linalg.exp + // CHECK-SAME: ins(%{{.+}} : memref<4x8x16xf32>) outs(%{{.+}} : memref<4x8x16xf32>) + linalg.exp ins(%arg0 : memref<4x8x16xf32>) outs(%arg1: memref<4x8x16xf32>) + return +} + +// ----- + +// CHECK-LABEL: func @exp_tensor +func.func @exp_tensor(%arg0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> { + %0 = tensor.empty() : tensor<4x8x16xf32> + // CHECK: linalg.exp + // CHECK-SAME: ins(%{{.+}} : tensor<4x8x16xf32>) outs(%{{.+}} : tensor<4x8x16xf32>) + %1 = linalg.exp ins(%arg0 : tensor<4x8x16xf32>) outs(%0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> + return %1 : tensor<4x8x16xf32> +} + +// ----- + +// CHECK-LABEL: func @log_dynamic +func.func @log_dynamic(%arg0: memref, %arg1: memref) { + // CHECK: linalg.log + // CHECK-SAME: ins(%{{.+}} : memref) outs(%{{.+}} : memref) + linalg.log ins(%arg0 : memref) outs(%arg1: memref) + return +} + +// ----- + +// CHECK-LABEL: func @log_static +func.func @log_static(%arg0: memref<4x8x16xf32>, %arg1: memref<4x8x16xf32>) { + // CHECK: linalg.log + // CHECK-SAME: ins(%{{.+}} : memref<4x8x16xf32>) outs(%{{.+}} : memref<4x8x16xf32>) + linalg.log ins(%arg0 : memref<4x8x16xf32>) outs(%arg1: memref<4x8x16xf32>) + return +} + +// ----- + +// CHECK-LABEL: func @log_tensor +func.func @log_tensor(%arg0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> { + %0 = tensor.empty() : tensor<4x8x16xf32> + // CHECK: linalg.log + // CHECK-SAME: ins(%{{.+}} : tensor<4x8x16xf32>) outs(%{{.+}} : tensor<4x8x16xf32>) + %1 = linalg.log ins(%arg0 : tensor<4x8x16xf32>) outs(%0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> + return %1 : tensor<4x8x16xf32> +} + +// ----- + +// CHECK-LABEL: func @abs_dynamic +func.func @abs_dynamic(%arg0: memref, %arg1: memref) { + // CHECK: linalg.abs + // CHECK-SAME: ins(%{{.+}} : memref) outs(%{{.+}} : memref) + linalg.abs ins(%arg0 : memref) outs(%arg1: memref) + return +} + +// ----- + +// CHECK-LABEL: func @abs_static +func.func @abs_static(%arg0: memref<4x8x16xf32>, %arg1: memref<4x8x16xf32>) { + // CHECK: linalg.abs + // CHECK-SAME: ins(%{{.+}} : memref<4x8x16xf32>) outs(%{{.+}} : memref<4x8x16xf32>) + linalg.abs ins(%arg0 : memref<4x8x16xf32>) outs(%arg1: memref<4x8x16xf32>) + return +} + +// ----- + +// CHECK-LABEL: func @abs_tensor +func.func @abs_tensor(%arg0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> { + %0 = tensor.empty() : tensor<4x8x16xf32> + // CHECK: linalg.abs + // CHECK-SAME: ins(%{{.+}} : tensor<4x8x16xf32>) outs(%{{.+}} : tensor<4x8x16xf32>) + %1 = linalg.abs ins(%arg0 : tensor<4x8x16xf32>) outs(%0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> + return %1 : tensor<4x8x16xf32> +} + +// ----- + +// CHECK-LABEL: func @ceil_dynamic +func.func @ceil_dynamic(%arg0: memref, %arg1: memref) { + // CHECK: linalg.ceil + // CHECK-SAME: ins(%{{.+}} : memref) outs(%{{.+}} : memref) + linalg.ceil ins(%arg0 : memref) outs(%arg1: memref) + return +} + +// ----- + +// CHECK-LABEL: func @ceil_static +func.func @ceil_static(%arg0: memref<4x8x16xf32>, %arg1: memref<4x8x16xf32>) { + // CHECK: linalg.ceil + // CHECK-SAME: ins(%{{.+}} : memref<4x8x16xf32>) outs(%{{.+}} : memref<4x8x16xf32>) + linalg.ceil ins(%arg0 : memref<4x8x16xf32>) outs(%arg1: memref<4x8x16xf32>) + return +} + +// ----- + +// CHECK-LABEL: func @ceil_tensor +func.func @ceil_tensor(%arg0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> { + %0 = tensor.empty() : tensor<4x8x16xf32> + // CHECK: linalg.ceil + // CHECK-SAME: ins(%{{.+}} : tensor<4x8x16xf32>) outs(%{{.+}} : tensor<4x8x16xf32>) + %1 = linalg.ceil ins(%arg0 : tensor<4x8x16xf32>) outs(%0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> + return %1 : tensor<4x8x16xf32> +} + +// ----- + +// CHECK-LABEL: func @floor_dynamic +func.func @floor_dynamic(%arg0: memref, %arg1: memref) { + // CHECK: linalg.floor + // CHECK-SAME: ins(%{{.+}} : memref) outs(%{{.+}} : memref) + linalg.floor ins(%arg0 : memref) outs(%arg1: memref) + return +} + +// ----- + +// CHECK-LABEL: func @floor_static +func.func @floor_static(%arg0: memref<4x8x16xf32>, %arg1: memref<4x8x16xf32>) { + // CHECK: linalg.floor + // CHECK-SAME: ins(%{{.+}} : memref<4x8x16xf32>) outs(%{{.+}} : memref<4x8x16xf32>) + linalg.floor ins(%arg0 : memref<4x8x16xf32>) outs(%arg1: memref<4x8x16xf32>) + return +} + +// ----- + +// CHECK-LABEL: func @floor_tensor +func.func @floor_tensor(%arg0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> { + %0 = tensor.empty() : tensor<4x8x16xf32> + // CHECK: linalg.floor + // CHECK-SAME: ins(%{{.+}} : tensor<4x8x16xf32>) outs(%{{.+}} : tensor<4x8x16xf32>) + %1 = linalg.floor ins(%arg0 : tensor<4x8x16xf32>) outs(%0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> + return %1 : tensor<4x8x16xf32> +} + +// ----- + +// CHECK-LABEL: func @negf_dynamic +func.func @negf_dynamic(%arg0: memref, %arg1: memref) { + // CHECK: linalg.negf + // CHECK-SAME: ins(%{{.+}} : memref) outs(%{{.+}} : memref) + linalg.negf ins(%arg0 : memref) outs(%arg1: memref) + return +} + +// ----- + +// CHECK-LABEL: func @negf_static +func.func @negf_static(%arg0: memref<4x8x16xf32>, %arg1: memref<4x8x16xf32>) { + // CHECK: linalg.negf + // CHECK-SAME: ins(%{{.+}} : memref<4x8x16xf32>) outs(%{{.+}} : memref<4x8x16xf32>) + linalg.negf ins(%arg0 : memref<4x8x16xf32>) outs(%arg1: memref<4x8x16xf32>) + return +} + +// ----- + +// CHECK-LABEL: func @negf_tensor +func.func @negf_tensor(%arg0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> { + %0 = tensor.empty() : tensor<4x8x16xf32> + // CHECK: linalg.negf + // CHECK-SAME: ins(%{{.+}} : tensor<4x8x16xf32>) outs(%{{.+}} : tensor<4x8x16xf32>) + %1 = linalg.negf ins(%arg0 : tensor<4x8x16xf32>) outs(%0: tensor<4x8x16xf32>) -> tensor<4x8x16xf32> + return %1 : tensor<4x8x16xf32> +}