diff --git a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td @@ -84,8 +84,12 @@ let options = [ Option<"profileName", "profile", "std::string", - /*default=*/"\"undefined\"", - "Validation if ops match for given profile">]; + /*default=*/"\"undefined\"", + "Validate if operations match for the given profile">, + Option<"StrictOperationSpecAlignment", "strict-op-spec-alignment", "bool", + /*default=*/"false", + "Verify if the properties of certain operations align the spec requirement">, + ]; } #endif // MLIR_DIALECT_TOSA_TRANSFORMS_PASSES diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp @@ -42,13 +42,45 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { public: explicit TosaValidation() = default; + void runOnOperation() override; private: - void runOnOperation() override; + template + LogicalResult checkConstantOperand(TosaOp op) { + return success(); + } std::optional profileType; }; +template <> +LogicalResult TosaValidation::checkConstantOperand(PadOp op) { + DenseElementsAttr paddings; + if (!matchPattern(op.getPadding(), m_Constant(&paddings))) { + return op.emitOpError("padding of pad isn't constant"); + } + + DenseElementsAttr pad_const; + // Assume this op is zero-padding if pad_const isn't presented. + if (op.getPadConst() && + !matchPattern(op.getPadConst(), m_Constant(&pad_const))) { + return op.emitOpError("pad_const of pad isn't constant"); + } + + return success(); +} + +template <> +LogicalResult +TosaValidation::checkConstantOperand(TransposeOp op) { + DenseElementsAttr perms; + if (!matchPattern(op.getPerms(), m_Constant(&perms))) { + return op.emitOpError("perms of transpose isn't constant"); + } + + return success(); +} + void TosaValidation::runOnOperation() { profileType = symbolizeEnum(profileName); @@ -62,6 +94,18 @@ return signalPassFailure(); } } + + if (StrictOperationSpecAlignment) { + // Some uses of TOSA rely on the constant operands of particular + // operations. + if (auto pad_op = dyn_cast(op)) + if (failed(checkConstantOperand(pad_op))) + signalPassFailure(); + + if (auto transpose_op = dyn_cast(op)) + if (failed(checkConstantOperand(transpose_op))) + signalPassFailure(); + } }); } } // namespace diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -verify-diagnostics +// RUN: mlir-opt %s -split-input-file -verify-diagnostics --tosa-validate=strict-op-spec-alignment func.func @test_conv2d(%arg0: tensor<1x29x29x4xf32>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { @@ -37,3 +37,28 @@ } +// ----- + +func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> { + // expected-error@+1 {{'tosa.pad' op padding of pad isn't constant}} + %0 = "tosa.pad"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32> + return %0 : tensor<13x21x3xf32> +} + +// ----- + +func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor) -> tensor<13x21x3xi8> { + %0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> + // expected-error@+1 {{'tosa.pad' op pad_const of pad isn't constant}} + %1 = "tosa.pad"(%arg0, %0, %arg1) : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor) -> tensor<13x21x3xi8> + return %1 : tensor<13x21x3xi8> +} + +// ----- + +func.func @test_transpose_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3xi32>) -> tensor<3x13x21xf32> { + // expected-error@+1 {{'tosa.transpose' op perms of transpose isn't constant}} + %0 = "tosa.transpose"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32> + return %0 : tensor<3x13x21xf32> +} +