diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml @@ -1383,6 +1383,83 @@ scalar_arg: K is_unsigned_cast: false --- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: depthwise_conv1D_nw + cpp_class_name: DepthwiseConv1DNwOp + doc: |- + Performs depth-wise 1-D convolution. + + Numeric casting is performed on the operands to the inner multiply, promoting + them to the same data type as the accumulator/output. Multiplier is set to 1 + which is a special case for most dpethwise convolutions. + implements: + - LinalgConvolutionOpInterface +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + usage: InputOperand + type_var: T1 + shape_map: affine_map<()[s0, s1, s2, s3, s4, s5] -> (s0, s1 * s2 + s3 * s4, s5)> + - !LinalgOperandDefConfig + name: K + usage: InputOperand + type_var: T2 + shape_map: affine_map<()[s0, s1, s2, s3, s4, s5] -> (s3, s5)> + - !LinalgOperandDefConfig + name: O + usage: OutputOperand + type_var: U + shape_map: affine_map<()[s0, s1, s2, s3, s4, s5] -> (s0, s1, s5)> + - !LinalgOperandDefConfig + name: strides + usage: IndexAttribute + type_var: I64 + attribute_map: affine_map<()[s0, s1, s2, s3, s4, s5] -> (s2)> + - !LinalgOperandDefConfig + name: dilations + usage: IndexAttribute + type_var: I64 + attribute_map: affine_map<()[s0, s1, s2, s3, s4, s5] -> (s4)> + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4, s5] -> (d0, d1 * s2 + d3 * s4, + d2)> + - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4, s5] -> (d3, d2)> + - affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4, s5] -> (d0, d1, d2)> + iterator_types: + - parallel + - parallel + - parallel + - reduction + assignments: + - !ScalarAssign + arg: O + value: !ScalarExpression + scalar_apply: + fn_name: add + operands: + - !ScalarExpression + scalar_arg: O + - !ScalarExpression + scalar_apply: + fn_name: mul + operands: + - !ScalarExpression + symbolic_cast: + type_var: U + operands: + - !ScalarExpression + scalar_arg: I + is_unsigned_cast: false + - !ScalarExpression + symbolic_cast: + type_var: U + operands: + - !ScalarExpression + scalar_arg: K + is_unsigned_cast: false +--- !LinalgOpConfig metadata: !LinalgOpMetadata name: depthwise_conv2D_nhw cpp_class_name: DepthwiseConv2DNhwOp diff --git a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py --- a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py +++ b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py @@ -309,6 +309,25 @@ U, I[D.n, D.od * S.SD + D.kd * S.DD, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW, D.c ]) * cast(U, K[D.kd, D.kh, D.kw, D.c, D.f]) +@linalg_structured_op +def depthwise_conv1D_nw( + I=TensorDef(T1, S.N, S.OW * S.SW + S.KW * S.DW, S.IC), + K=TensorDef(T2, S.KW, S.IC), + O=TensorDef(U, S.N, S.OW, S.IC, output=True), + strides=AttributeDef(S.SW), + dilations=AttributeDef(S.DW)): + """Performs depth-wise 1-D convolution. + + Numeric casting is performed on the operands to the inner multiply, promoting + them to the same data type as the accumulator/output. Multiplier is set to 1 + which is a special case for most dpethwise convolutions. + """ + implements(ConvolutionOpInterface) + domain(D.n, D.ow, D.ic, D.kw) + O[D.n, D.ow, D.ic] += \ + cast(U, I[D.n, D.ow * S.SW + D.kw * S.DW, D.ic]) * \ + cast(U, K[D.kw, D.ic]) + @linalg_structured_op def depthwise_conv2D_nhw( I=TensorDef(T1, S.N, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW, S.IC), diff --git a/mlir/test/Dialect/Linalg/named-ops.mlir b/mlir/test/Dialect/Linalg/named-ops.mlir --- a/mlir/test/Dialect/Linalg/named-ops.mlir +++ b/mlir/test/Dialect/Linalg/named-ops.mlir @@ -29,6 +29,19 @@ return } +// CHECK-LABEL: func @depthwise_conv1D_nw_tensor +func @depthwise_conv1D_nw_tensor(%input: tensor<1x113x96xf32>, %filter: tensor<3x96xf32>) -> tensor<1x56x96xf32> { + %init = linalg.init_tensor [1, 56, 96] : tensor<1x56x96xf32> + // CHECK: %{{.+}} = linalg.depthwise_conv1D_nw + // CHECK-SAME: {dilations = dense<1> : vector<1xi64>, strides = dense<2> : vector<1xi64>} + // CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x113x96xf32>, tensor<3x96xf32>) + // CHECK-SAME: outs(%{{.+}} : tensor<1x56x96xf32>) -> tensor<1x56x96xf32> + %0 = linalg.depthwise_conv1D_nw {dilations = dense<1> : vector<1xi64>, strides = dense<2> : vector<1xi64>} + ins(%input, %filter: tensor<1x113x96xf32>, tensor<3x96xf32>) + outs(%init: tensor<1x56x96xf32>) -> tensor<1x56x96xf32> + return %0: tensor<1x56x96xf32> +} + // CHECK-LABEL: func @depthwise_conv2D_nhw_tensor func @depthwise_conv2D_nhw_tensor(%input: tensor<1x113x113x96xf32>, %filter: tensor<3x3x96xf32>) -> tensor<1x56x56x96xf32> { %init = linalg.init_tensor [1, 56, 56, 96] : tensor<1x56x56x96xf32>