diff --git a/mlir/docs/OpDefinitions.md b/mlir/docs/OpDefinitions.md --- a/mlir/docs/OpDefinitions.md +++ b/mlir/docs/OpDefinitions.md @@ -106,7 +106,7 @@ for all the fields it requires. For example, `tf.AvgPool` is defined as ```tablegen -def TF_AvgPoolOp : TF_Op<"AvgPool", [NoSideEffect]> { +def TF_AvgPoolOp : TF_Op<"AvgPool", [NoMemoryEffect]> { let summary = "Performs average pooling on the input."; let description = [{ diff --git a/mlir/docs/Tutorials/QuickstartRewrites.md b/mlir/docs/Tutorials/QuickstartRewrites.md --- a/mlir/docs/Tutorials/QuickstartRewrites.md +++ b/mlir/docs/Tutorials/QuickstartRewrites.md @@ -45,7 +45,7 @@ ```tablegen def TFL_LeakyReluOp: TFL_Op, + [NoMemoryEffect, SameValueType]>, Results<(outs Tensor)> { let arguments = (ins F32Tensor:$x, diff --git a/mlir/docs/Tutorials/Toy/Ch-3.md b/mlir/docs/Tutorials/Toy/Ch-3.md --- a/mlir/docs/Tutorials/Toy/Ch-3.md +++ b/mlir/docs/Tutorials/Toy/Ch-3.md @@ -144,10 +144,10 @@ last transform with the function input and left behind the now dead transpose input. The Canonicalizer knows to clean up dead operations; however, MLIR conservatively assumes that operations may have side-effects. We can fix this by -adding a new trait, `NoSideEffect`, to our `TransposeOp`: +adding a new trait, `NoMemoryEffect`, to our `TransposeOp`: ```tablegen -def TransposeOp : Toy_Op<"transpose", [NoSideEffect]> {...} +def TransposeOp : Toy_Op<"transpose", [NoMemoryEffect]> {...} ``` Let's retry now `toyc-ch3 test/transpose_transpose.toy -emit=mlir -opt`: diff --git a/mlir/docs/Tutorials/Toy/Ch-4.md b/mlir/docs/Tutorials/Toy/Ch-4.md --- a/mlir/docs/Tutorials/Toy/Ch-4.md +++ b/mlir/docs/Tutorials/Toy/Ch-4.md @@ -222,7 +222,7 @@ ```tablegen def CastOp : Toy_Op<"cast", [ DeclareOpInterfaceMethods, - NoSideEffect, + NoMemoryEffect, SameOperandsAndResultShape] > { let summary = "shape cast operation"; diff --git a/mlir/examples/standalone/include/Standalone/StandaloneOps.td b/mlir/examples/standalone/include/Standalone/StandaloneOps.td --- a/mlir/examples/standalone/include/Standalone/StandaloneOps.td +++ b/mlir/examples/standalone/include/Standalone/StandaloneOps.td @@ -13,7 +13,7 @@ include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" -def Standalone_FooOp : Standalone_Op<"foo", [NoSideEffect, +def Standalone_FooOp : Standalone_Op<"foo", [Pure, SameOperandsAndResultType]> { let summary = "Illustrates how to define an operation."; let description = [{ diff --git a/mlir/examples/toy/Ch2/include/toy/Ops.td b/mlir/examples/toy/Ch2/include/toy/Ops.td --- a/mlir/examples/toy/Ch2/include/toy/Ops.td +++ b/mlir/examples/toy/Ch2/include/toy/Ops.td @@ -43,9 +43,9 @@ // We define a toy operation by inheriting from our base 'Toy_Op' class above. // Here we provide the mnemonic and a list of traits for the operation. The -// constant operation is marked as 'NoSideEffect' as it is a pure operation +// constant operation is marked as 'Pure' as it is a pure operation // and may be removed if dead. -def ConstantOp : Toy_Op<"constant", [NoSideEffect]> { +def ConstantOp : Toy_Op<"constant", [Pure]> { // Provide a summary and description for this operation. This can be used to // auto-generate documentation of the operations within our dialect. let summary = "constant"; @@ -265,7 +265,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Toy_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ diff --git a/mlir/examples/toy/Ch3/include/toy/Ops.td b/mlir/examples/toy/Ch3/include/toy/Ops.td --- a/mlir/examples/toy/Ch3/include/toy/Ops.td +++ b/mlir/examples/toy/Ch3/include/toy/Ops.td @@ -42,9 +42,9 @@ // We define a toy operation by inheriting from our base 'Toy_Op' class above. // Here we provide the mnemonic and a list of traits for the operation. The -// constant operation is marked as 'NoSideEffect' as it is a pure operation +// constant operation is marked as 'Pure' as it is a pure operation // and may be removed if dead. -def ConstantOp : Toy_Op<"constant", [NoSideEffect]> { +def ConstantOp : Toy_Op<"constant", [Pure]> { // Provide a summary and description for this operation. This can be used to // auto-generate documentation of the operations within our dialect. let summary = "constant"; @@ -88,7 +88,7 @@ // AddOp //===----------------------------------------------------------------------===// -def AddOp : Toy_Op<"add", [NoSideEffect]> { +def AddOp : Toy_Op<"add", [Pure]> { let summary = "element-wise addition operation"; let description = [{ The "add" operation performs element-wise addition between two tensors. @@ -199,7 +199,7 @@ // MulOp //===----------------------------------------------------------------------===// -def MulOp : Toy_Op<"mul", [NoSideEffect]> { +def MulOp : Toy_Op<"mul", [Pure]> { let summary = "element-wise multiplication operation"; let description = [{ The "mul" operation performs element-wise multiplication between two @@ -239,7 +239,7 @@ // ReshapeOp //===----------------------------------------------------------------------===// -def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { +def ReshapeOp : Toy_Op<"reshape", [Pure]> { let summary = "tensor reshape operation"; let description = [{ Reshape operation is transforming its input tensor into a new tensor with @@ -267,7 +267,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Toy_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -309,7 +309,7 @@ // TransposeOp //===----------------------------------------------------------------------===// -def TransposeOp : Toy_Op<"transpose", [NoSideEffect]> { +def TransposeOp : Toy_Op<"transpose", [Pure]> { let summary = "transpose operation"; let arguments = (ins F64Tensor:$input); diff --git a/mlir/examples/toy/Ch4/include/toy/Ops.td b/mlir/examples/toy/Ch4/include/toy/Ops.td --- a/mlir/examples/toy/Ch4/include/toy/Ops.td +++ b/mlir/examples/toy/Ch4/include/toy/Ops.td @@ -45,9 +45,9 @@ // We define a toy operation by inheriting from our base 'Toy_Op' class above. // Here we provide the mnemonic and a list of traits for the operation. The -// constant operation is marked as 'NoSideEffect' as it is a pure operation +// constant operation is marked as 'Pure' as it is a pure operation // and may be removed if dead. -def ConstantOp : Toy_Op<"constant", [NoSideEffect]> { +def ConstantOp : Toy_Op<"constant", [Pure]> { // Provide a summary and description for this operation. This can be used to // auto-generate documentation of the operations within our dialect. let summary = "constant"; @@ -92,7 +92,7 @@ //===----------------------------------------------------------------------===// def AddOp : Toy_Op<"add", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise addition operation"; let description = [{ The "add" operation performs element-wise addition between two tensors. @@ -118,7 +118,7 @@ def CastOp : Toy_Op<"cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - NoSideEffect, + Pure, SameOperandsAndResultShape ]> { let summary = "shape cast operation"; @@ -231,7 +231,7 @@ //===----------------------------------------------------------------------===// def MulOp : Toy_Op<"mul", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise multiplication operation"; let description = [{ The "mul" operation performs element-wise multiplication between two @@ -271,7 +271,7 @@ // ReshapeOp //===----------------------------------------------------------------------===// -def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { +def ReshapeOp : Toy_Op<"reshape", [Pure]> { let summary = "tensor reshape operation"; let description = [{ Reshape operation is transforming its input tensor into a new tensor with @@ -299,7 +299,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Toy_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -342,7 +342,7 @@ //===----------------------------------------------------------------------===// def TransposeOp : Toy_Op<"transpose", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "transpose operation"; let arguments = (ins F64Tensor:$input); diff --git a/mlir/examples/toy/Ch5/include/toy/Ops.td b/mlir/examples/toy/Ch5/include/toy/Ops.td --- a/mlir/examples/toy/Ch5/include/toy/Ops.td +++ b/mlir/examples/toy/Ch5/include/toy/Ops.td @@ -45,9 +45,9 @@ // We define a toy operation by inheriting from our base 'Toy_Op' class above. // Here we provide the mnemonic and a list of traits for the operation. The -// constant operation is marked as 'NoSideEffect' as it is a pure operation +// constant operation is marked as 'Pure' as it is a pure operation // and may be removed if dead. -def ConstantOp : Toy_Op<"constant", [NoSideEffect]> { +def ConstantOp : Toy_Op<"constant", [Pure]> { // Provide a summary and description for this operation. This can be used to // auto-generate documentation of the operations within our dialect. let summary = "constant"; @@ -92,7 +92,7 @@ //===----------------------------------------------------------------------===// def AddOp : Toy_Op<"add", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise addition operation"; let description = [{ The "add" operation performs element-wise addition between two tensors. @@ -118,7 +118,7 @@ def CastOp : Toy_Op<"cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - NoSideEffect, + Pure, SameOperandsAndResultShape ]> { let summary = "shape cast operation"; @@ -231,7 +231,7 @@ //===----------------------------------------------------------------------===// def MulOp : Toy_Op<"mul", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise multiplication operation"; let description = [{ The "mul" operation performs element-wise multiplication between two @@ -272,7 +272,7 @@ // ReshapeOp //===----------------------------------------------------------------------===// -def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { +def ReshapeOp : Toy_Op<"reshape", [Pure]> { let summary = "tensor reshape operation"; let description = [{ Reshape operation is transforming its input tensor into a new tensor with @@ -300,7 +300,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Toy_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -343,7 +343,7 @@ //===----------------------------------------------------------------------===// def TransposeOp : Toy_Op<"transpose", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "transpose operation"; let arguments = (ins F64Tensor:$input); diff --git a/mlir/examples/toy/Ch6/include/toy/Ops.td b/mlir/examples/toy/Ch6/include/toy/Ops.td --- a/mlir/examples/toy/Ch6/include/toy/Ops.td +++ b/mlir/examples/toy/Ch6/include/toy/Ops.td @@ -45,9 +45,9 @@ // We define a toy operation by inheriting from our base 'Toy_Op' class above. // Here we provide the mnemonic and a list of traits for the operation. The -// constant operation is marked as 'NoSideEffect' as it is a pure operation +// constant operation is marked as 'Pure' as it is a pure operation // and may be removed if dead. -def ConstantOp : Toy_Op<"constant", [NoSideEffect]> { +def ConstantOp : Toy_Op<"constant", [Pure]> { // Provide a summary and description for this operation. This can be used to // auto-generate documentation of the operations within our dialect. let summary = "constant"; @@ -92,7 +92,7 @@ //===----------------------------------------------------------------------===// def AddOp : Toy_Op<"add", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise addition operation"; let description = [{ The "add" operation performs element-wise addition between two tensors. @@ -118,7 +118,7 @@ def CastOp : Toy_Op<"cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - NoSideEffect, + Pure, SameOperandsAndResultShape ]> { let summary = "shape cast operation"; @@ -231,7 +231,7 @@ //===----------------------------------------------------------------------===// def MulOp : Toy_Op<"mul", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise multiplication operation"; let description = [{ The "mul" operation performs element-wise multiplication between two @@ -272,7 +272,7 @@ // ReshapeOp //===----------------------------------------------------------------------===// -def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { +def ReshapeOp : Toy_Op<"reshape", [Pure]> { let summary = "tensor reshape operation"; let description = [{ Reshape operation is transforming its input tensor into a new tensor with @@ -300,7 +300,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Toy_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -343,7 +343,7 @@ //===----------------------------------------------------------------------===// def TransposeOp : Toy_Op<"transpose", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "transpose operation"; let arguments = (ins F64Tensor:$input); diff --git a/mlir/examples/toy/Ch7/include/toy/Ops.td b/mlir/examples/toy/Ch7/include/toy/Ops.td --- a/mlir/examples/toy/Ch7/include/toy/Ops.td +++ b/mlir/examples/toy/Ch7/include/toy/Ops.td @@ -63,10 +63,10 @@ // We define a toy operation by inheriting from our base 'Toy_Op' class above. // Here we provide the mnemonic and a list of traits for the operation. The -// constant operation is marked as 'NoSideEffect' as it is a pure operation +// constant operation is marked as 'Pure' as it is a pure operation // and may be removed if dead. def ConstantOp : Toy_Op<"constant", - [ConstantLike, NoSideEffect, + [ConstantLike, Pure, DeclareOpInterfaceMethods]> { // Provide a summary and description for this operation. This can be used to // auto-generate documentation of the operations within our dialect. @@ -115,7 +115,7 @@ //===----------------------------------------------------------------------===// def AddOp : Toy_Op<"add", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise addition operation"; let description = [{ The "add" operation performs element-wise addition between two tensors. @@ -141,7 +141,7 @@ def CastOp : Toy_Op<"cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - NoSideEffect, + Pure, SameOperandsAndResultShape ]> { let summary = "shape cast operation"; @@ -255,7 +255,7 @@ //===----------------------------------------------------------------------===// def MulOp : Toy_Op<"mul", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "element-wise multiplication operation"; let description = [{ The "mul" operation performs element-wise multiplication between two @@ -296,7 +296,7 @@ // ReshapeOp //===----------------------------------------------------------------------===// -def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { +def ReshapeOp : Toy_Op<"reshape", [Pure]> { let summary = "tensor reshape operation"; let description = [{ Reshape operation is transforming its input tensor into a new tensor with @@ -324,7 +324,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Toy_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -366,7 +366,7 @@ // StructAccessOp //===----------------------------------------------------------------------===// -def StructAccessOp : Toy_Op<"struct_access", [NoSideEffect]> { +def StructAccessOp : Toy_Op<"struct_access", [Pure]> { let summary = "struct access"; let description = [{ Access the Nth element of a value returning a struct type. @@ -395,7 +395,7 @@ // StructConstantOp //===----------------------------------------------------------------------===// -def StructConstantOp : Toy_Op<"struct_constant", [ConstantLike, NoSideEffect]> { +def StructConstantOp : Toy_Op<"struct_constant", [ConstantLike, Pure]> { let summary = "struct constant"; let description = [{ Constant operation turns a literal struct value into an SSA value. The data @@ -424,7 +424,7 @@ //===----------------------------------------------------------------------===// def TransposeOp : Toy_Op<"transpose", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "transpose operation"; let arguments = (ins F64Tensor:$input); diff --git a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td --- a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td @@ -216,7 +216,7 @@ def AMDGPU_MFMAOp : AMDGPU_Op<"mfma", [AllTypesMatch<["sourceA", "sourceB"]>, AllTypesMatch<["destC", "destD"]>, - NoSideEffect]>, + Pure]>, Arguments<(ins I32Attr:$m, I32Attr:$n, diff --git a/mlir/include/mlir/Dialect/AMX/AMX.td b/mlir/include/mlir/Dialect/AMX/AMX.td --- a/mlir/include/mlir/Dialect/AMX/AMX.td +++ b/mlir/include/mlir/Dialect/AMX/AMX.td @@ -78,7 +78,7 @@ // Tile reset. // -def TileZeroOp : AMX_Op<"tile_zero", [NoSideEffect]> { +def TileZeroOp : AMX_Op<"tile_zero", [Pure]> { let summary = "tile zero operation"; let description = [{ Zeroes the destination tile, with the shape defined by the 2-dim @@ -106,7 +106,7 @@ // Tile memory operations. // -def TileLoadOp : AMX_Op<"tile_load", [NoSideEffect]> { +def TileLoadOp : AMX_Op<"tile_load", [Pure]> { let summary = "tile load operation"; let description = [{ Loads a tile from memory defined by a base and indices, with the @@ -171,7 +171,8 @@ // Tile arithmetic operations. // -def TileMulFOp : AMX_Op<"tile_mulf", [NoSideEffect, AllTypesMatch<["acc", "res"]>]> { +def TileMulFOp : AMX_Op<"tile_mulf", [ + Pure, AllTypesMatch<["acc", "res"]>]> { let summary = "tile multiplication operation (floating-point)"; let description = [{ Multiplies a "m x k" tile with a "k x n" tile and accumulates the results @@ -206,7 +207,8 @@ let hasVerifier = 1; } -def TileMulIOp : AMX_Op<"tile_muli", [NoSideEffect, AllTypesMatch<["acc", "res"]>]> { +def TileMulIOp : AMX_Op<"tile_muli", [ + Pure, AllTypesMatch<["acc", "res"]>]> { let summary = "tile multiplication operation (integer)"; let description = [{ Multiplies a "m x k" tile with a "k x n" tile and accumulates the results diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td @@ -34,7 +34,7 @@ def ImplicitAffineTerminator : SingleBlockImplicitTerminator<"AffineYieldOp">; -def AffineApplyOp : Affine_Op<"apply", [NoSideEffect]> { +def AffineApplyOp : Affine_Op<"apply", [Pure]> { let summary = "affine apply operation"; let description = [{ The affine.apply operation applies an [affine mapping](#affine-expressions) @@ -105,8 +105,8 @@ } def AffineForOp : Affine_Op<"for", - [AutomaticAllocationScope, ImplicitAffineTerminator, RecursiveSideEffects, - DeclareOpInterfaceMethods, DeclareOpInterfaceMethods { + [ImplicitAffineTerminator, RecursivelySpeculatable, + RecursiveMemoryEffects, NoRegionArguments]> { let summary = "if-then-else operation"; let description = [{ Syntax: @@ -571,7 +571,7 @@ let hasVerifier = 1; } -def AffineMinOp : AffineMinMaxOpBase<"min", [NoSideEffect]> { +def AffineMinOp : AffineMinMaxOpBase<"min", [Pure]> { let summary = "min operation"; let description = [{ Syntax: @@ -595,7 +595,7 @@ }]; } -def AffineMaxOp : AffineMinMaxOpBase<"max", [NoSideEffect]> { +def AffineMaxOp : AffineMinMaxOpBase<"max", [Pure]> { let summary = "max operation"; let description = [{ The "max" operation computes the maximum value result from a multi-result @@ -610,8 +610,9 @@ } def AffineParallelOp : Affine_Op<"parallel", - [AutomaticAllocationScope, ImplicitAffineTerminator, RecursiveSideEffects, - DeclareOpInterfaceMethods, MemRefsNormalizable]> { + [AutomaticAllocationScope, ImplicitAffineTerminator, RecursivelySpeculatable, + RecursiveMemoryEffects, DeclareOpInterfaceMethods, + MemRefsNormalizable]> { let summary = "multi-index parallel band operation"; let description = [{ The "affine.parallel" operation represents a hyper-rectangular affine @@ -902,7 +903,7 @@ let hasVerifier = 1; } -def AffineYieldOp : Affine_Op<"yield", [NoSideEffect, Terminator, ReturnLike, +def AffineYieldOp : Affine_Op<"yield", [Pure, Terminator, ReturnLike, MemRefsNormalizable]> { let summary = "Yield values to parent operation"; let description = [{ @@ -1064,7 +1065,7 @@ //===----------------------------------------------------------------------===// def AffineDelinearizeIndexOp : Affine_Op<"delinearize_index", - [NoSideEffect]> { + [Pure]> { let summary = "delinearize an index"; let description = [{ The `affine.delinearize_index` operation takes a single index value and diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td --- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td +++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td @@ -21,7 +21,7 @@ // Base class for Arith dialect ops. Ops in this dialect have no side // effects and can be applied element-wise to vectors and tensors. class Arith_Op traits = []> : - Op] # ElementwiseMappable.traits>; @@ -127,7 +127,7 @@ //===----------------------------------------------------------------------===// def Arith_ConstantOp : Op, AllTypesMatch<["value", "result"]>, DeclareOpInterfaceMethods]> { diff --git a/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td b/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td --- a/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td +++ b/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td @@ -64,7 +64,7 @@ : ArmNeon_IntrOp; def SMullOp : ArmNeon_OverloadedOneResultIntrOp<"smull", [ - NoSideEffect, + Pure, AllTypesMatch<["a", "b"]>, TypesMatchWith< "res has same vector shape and element bitwidth scaled by 2 as a", @@ -93,7 +93,7 @@ } def SdotOp : ArmNeon_OverloadedOperandsWithOneResultIntrOp<"sdot",[1], [ - NoSideEffect, + Pure, AllTypesMatch<["b", "c"]>, AllTypesMatch<["a", "res"]>, TypesMatchWith<"res has the same number of elements as operand b", @@ -126,7 +126,7 @@ /*traits=*/traits>; def Sdot2dOp : ArmNeon_2dOp<"sdot", [ - NoSideEffect, + Pure, AllTypesMatch<["b", "c"]>, AllTypesMatch<["a", "res"]>, PredOpTrait< diff --git a/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td b/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td --- a/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td +++ b/mlir/include/mlir/Dialect/ArmSVE/ArmSVE.td @@ -94,7 +94,7 @@ } def SdotOp : ArmSVE_Op<"sdot", - [NoSideEffect, + [Pure, AllTypesMatch<["src1", "src2"]>, AllTypesMatch<["acc", "dst"]>, ]> { @@ -125,7 +125,7 @@ } def SmmlaOp : ArmSVE_Op<"smmla", - [NoSideEffect, + [Pure, AllTypesMatch<["src1", "src2"]>, AllTypesMatch<["acc", "dst"]>, ]> { @@ -157,7 +157,7 @@ } def UdotOp : ArmSVE_Op<"udot", - [NoSideEffect, + [Pure, AllTypesMatch<["src1", "src2"]>, AllTypesMatch<["acc", "dst"]>, ]> { @@ -188,7 +188,7 @@ } def UmmlaOp : ArmSVE_Op<"ummla", - [NoSideEffect, + [Pure, AllTypesMatch<["src1", "src2"]>, AllTypesMatch<["acc", "dst"]>, ]> { diff --git a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td --- a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td +++ b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td @@ -101,7 +101,7 @@ def Async_YieldOp : Async_Op<"yield", [ - HasParent<"ExecuteOp">, NoSideEffect, Terminator, + HasParent<"ExecuteOp">, Pure, Terminator, DeclareOpInterfaceMethods]> { let summary = "terminator for Async execute operation"; let description = [{ @@ -156,7 +156,7 @@ }]; } -def Async_CreateGroupOp : Async_Op<"create_group", [NoSideEffect]> { +def Async_CreateGroupOp : Async_Op<"create_group", [Pure]> { let summary = "creates an empty async group"; let description = [{ The `async.create_group` allocates an empty async group. Async tokens or diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -345,7 +345,7 @@ BufferizableOpInterface, SameOperandsAndResultShape, SameOperandsAndResultElementType, - NoSideEffect, + Pure, TypesMatchWith<"type of 'tensor' is the tensor equivalent of 'memref'", "memref", "tensor", "memref::getTensorTypeFromMemRefType($_self)"> diff --git a/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td b/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td --- a/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td +++ b/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td @@ -21,7 +21,7 @@ // floating-point element type. These operations take two operands and return // one result, all of which must be complex numbers of the same type. class ComplexArithmeticOp traits = []> : - Complex_Op { let arguments = (ins Complex:$lhs, Complex:$rhs); let results = (outs Complex:$result); @@ -32,7 +32,7 @@ // floating-point element type. These operations take one operand and return // one result; the operand must be a complex number. class ComplexUnaryOp traits = []> : - Complex_Op { + Complex_Op { let arguments = (ins Complex:$complex); let assemblyFormat = "$complex attr-dict `:` type($complex)"; } @@ -100,7 +100,7 @@ //===----------------------------------------------------------------------===// def ConstantOp : Complex_Op<"constant", [ - ConstantLike, NoSideEffect, + ConstantLike, Pure, DeclareOpInterfaceMethods ]> { let summary = "complex number constant operation"; @@ -154,7 +154,7 @@ //===----------------------------------------------------------------------===// def CreateOp : Complex_Op<"create", - [NoSideEffect, + [Pure, AllTypesMatch<["real", "imaginary"]>, TypesMatchWith<"complex element type matches real operand type", "complex", "real", @@ -203,7 +203,7 @@ //===----------------------------------------------------------------------===// def EqualOp : Complex_Op<"eq", - [NoSideEffect, AllTypesMatch<["lhs", "rhs"]>, Elementwise]> { + [Pure, AllTypesMatch<["lhs", "rhs"]>, Elementwise]> { let summary = "computes whether two complex values are equal"; let description = [{ The `eq` op takes two complex numbers and returns whether they are equal. @@ -378,7 +378,7 @@ //===----------------------------------------------------------------------===// def NotEqualOp : Complex_Op<"neq", - [NoSideEffect, AllTypesMatch<["lhs", "rhs"]>, Elementwise]> { + [Pure, AllTypesMatch<["lhs", "rhs"]>, Elementwise]> { let summary = "computes whether two complex values are not equal"; let description = [{ The `neq` op takes two complex numbers and returns whether they are not diff --git a/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td b/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td --- a/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td +++ b/mlir/include/mlir/Dialect/ControlFlow/IR/ControlFlowOps.td @@ -64,7 +64,7 @@ def BranchOp : CF_Op<"br", [ DeclareOpInterfaceMethods, - NoSideEffect, Terminator + Pure, Terminator ]> { let summary = "branch operation"; let description = [{ @@ -113,7 +113,7 @@ def CondBranchOp : CF_Op<"cond_br", [AttrSizedOperandSegments, DeclareOpInterfaceMethods, - NoSideEffect, Terminator]> { + Pure, Terminator]> { let summary = "conditional branch operation"; let description = [{ The `cond_br` terminator operation represents a conditional branch on a @@ -228,7 +228,7 @@ def SwitchOp : CF_Op<"switch", [AttrSizedOperandSegments, DeclareOpInterfaceMethods, - NoSideEffect, Terminator]> { + Pure, Terminator]> { let summary = "switch operation"; let description = [{ The `switch` terminator operation represents a switch on a signless integer diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td --- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td +++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td @@ -148,7 +148,7 @@ } def EmitC_IncludeOp - : EmitC_Op<"include", [NoSideEffect, HasParent<"ModuleOp">]> { + : EmitC_Op<"include", [Pure, HasParent<"ModuleOp">]> { let summary = "Include operation"; let description = [{ The `include` operation allows to define a source file inclusion via the diff --git a/mlir/include/mlir/Dialect/Func/IR/FuncOps.td b/mlir/include/mlir/Dialect/Func/IR/FuncOps.td --- a/mlir/include/mlir/Dialect/Func/IR/FuncOps.td +++ b/mlir/include/mlir/Dialect/Func/IR/FuncOps.td @@ -165,7 +165,7 @@ //===----------------------------------------------------------------------===// def ConstantOp : Func_Op<"constant", - [ConstantLike, NoSideEffect, + [ConstantLike, Pure, DeclareOpInterfaceMethods]> { let summary = "constant"; let description = [{ @@ -327,7 +327,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : Func_Op<"return", [NoSideEffect, HasParent<"FuncOp">, +def ReturnOp : Func_Op<"return", [Pure, HasParent<"FuncOp">, MemRefsNormalizable, ReturnLike, Terminator]> { let summary = "Function return operation"; let description = [{ diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td --- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td +++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td @@ -45,7 +45,7 @@ class GPU_IndexOp traits = []> : GPU_Op])>, + Pure, DeclareOpInterfaceMethods])>, Arguments<(ins GPU_DimensionAttr:$dimension)>, Results<(outs Index)> { let assemblyFormat = "$dimension attr-dict"; } @@ -100,7 +100,7 @@ } def GPU_LaneIdOp : GPU_Op<"lane_id", [ - NoSideEffect, DeclareOpInterfaceMethods]> { + Pure, DeclareOpInterfaceMethods]> { let description = [{ Returns the lane id within the subgroup (warp/wave). @@ -114,7 +114,7 @@ } def GPU_SubgroupIdOp : GPU_Op<"subgroup_id", [ - NoSideEffect, DeclareOpInterfaceMethods]>, + Pure, DeclareOpInterfaceMethods]>, Arguments<(ins)>, Results<(outs Index:$result)> { let description = [{ Returns the subgroup id, i.e. the index of the current subgroup within the @@ -146,7 +146,7 @@ def GPU_NumSubgroupsOp : GPU_Op<"num_subgroups", [ - NoSideEffect, DeclareOpInterfaceMethods]>, + Pure, DeclareOpInterfaceMethods]>, Arguments<(ins)>, Results<(outs Index:$result)> { let description = [{ Returns the number of subgroups within a workgroup. @@ -162,7 +162,7 @@ } def GPU_SubgroupSizeOp : GPU_Op<"subgroup_size", [ - NoSideEffect, DeclareOpInterfaceMethods]>, + Pure, DeclareOpInterfaceMethods]>, Arguments<(ins)>, Results<(outs Index:$result)> { let description = [{ Returns the number of threads within a subgroup. @@ -612,7 +612,7 @@ }]; } -def GPU_ReturnOp : GPU_Op<"return", [HasParent<"GPUFuncOp">, NoSideEffect, +def GPU_ReturnOp : GPU_Op<"return", [HasParent<"GPUFuncOp">, Pure, Terminator]>, Arguments<(ins Variadic:$operands)>, Results<(outs)> { let summary = "Terminator for GPU functions."; @@ -629,7 +629,7 @@ } def GPU_TerminatorOp : GPU_Op<"terminator", [HasParent<"LaunchOp">, - NoSideEffect, Terminator]>, + Pure, Terminator]>, Arguments<(ins)>, Results<(outs)> { let summary = "Terminator for GPU launch regions."; let description = [{ @@ -641,7 +641,7 @@ let assemblyFormat = "attr-dict"; } -def GPU_YieldOp : GPU_Op<"yield", [NoSideEffect, Terminator]>, +def GPU_YieldOp : GPU_Op<"yield", [Pure, Terminator]>, Arguments<(ins Variadic:$values)> { let summary = "GPU yield operation"; let description = [{ @@ -761,7 +761,7 @@ "i32 or f32">; def GPU_ShuffleOp : GPU_Op< - "shuffle", [NoSideEffect, AllTypesMatch<["value", "shuffleResult"]>]>, + "shuffle", [Pure, AllTypesMatch<["value", "shuffleResult"]>]>, Arguments<(ins I32OrF32:$value, I32:$offset, I32:$width, GPU_ShuffleModeAttr:$mode)>, Results<(outs I32OrF32:$shuffleResult, I1:$valid)> { @@ -1164,7 +1164,7 @@ } def GPU_SubgroupMmaComputeOp : GPU_Op<"subgroup_mma_compute", - [NoSideEffect, AllTypesMatch<["opC", "res"]>]>{ + [Pure, AllTypesMatch<["opC", "res"]>]>{ let summary = "GPU warp synchronous matrix multiply accumulate"; @@ -1202,7 +1202,7 @@ } def GPU_SubgroupMmaConstantMatrixOp : GPU_Op<"subgroup_mma_constant_matrix", - [NoSideEffect, + [Pure, TypesMatchWith<"value type matches element type of mma_matrix", "res", "value", "$_self.cast().getElementType()">]>{ @@ -1267,7 +1267,7 @@ "mma_element_wise">; def GPU_SubgroupMmaElementwiseOp : GPU_Op<"subgroup_mma_elementwise", - [NoSideEffect, + [Pure, AllTypesMatch<["args"]>]>{ let summary = "GPU warp elementwise operation on a matrix"; diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td @@ -12,35 +12,35 @@ class LLVM_UnaryIntrinsicOp traits = []> : LLVM_OneResultIntrOp { + !listconcat([Pure, SameOperandsAndResultType], traits)> { let arguments = (ins LLVM_Type:$in); } class LLVM_BinarySameArgsIntrinsicOp traits = []> : LLVM_OneResultIntrOp { + !listconcat([Pure, SameOperandsAndResultType], traits)> { let arguments = (ins LLVM_Type:$a, LLVM_Type:$b); } class LLVM_BinaryIntrinsicOp traits = []> : LLVM_OneResultIntrOp { + !listconcat([Pure], traits)> { let arguments = (ins LLVM_Type:$a, LLVM_Type:$b); } class LLVM_TernarySameArgsIntrinsicOp traits = []> : LLVM_OneResultIntrOp { + !listconcat([Pure, SameOperandsAndResultType], traits)> { let arguments = (ins LLVM_Type:$a, LLVM_Type:$b, LLVM_Type:$c); } class LLVM_CountZerosIntrinsicOp traits = []> : LLVM_OneResultIntrOp { + !listconcat([Pure], traits)> { let arguments = (ins LLVM_Type:$in, I1:$zero_undefined); } -def LLVM_AbsOp : LLVM_OneResultIntrOp<"abs", [], [0], [NoSideEffect]> { +def LLVM_AbsOp : LLVM_OneResultIntrOp<"abs", [], [0], [Pure]> { let arguments = (ins LLVM_Type:$in, I1:$is_int_min_poison); } @@ -389,7 +389,7 @@ /// Create a llvm.get.active.lane.mask to set a mask up to a given position. def LLVM_GetActiveLaneMaskOp - : LLVM_OneResultIntrOp<"get.active.lane.mask", [0], [0], [NoSideEffect]> { + : LLVM_OneResultIntrOp<"get.active.lane.mask", [0], [0], [Pure]> { let arguments = (ins LLVM_Type:$base, LLVM_Type:$n); let assemblyFormat = "$base `,` $n attr-dict `:` " "type($base) `,` type($n) `to` type($res)"; @@ -468,7 +468,7 @@ /// Create a call to stepvector intrinsic. def LLVM_StepVectorOp - : LLVM_IntrOp<"experimental.stepvector", [0], [], [NoSideEffect], 1> { + : LLVM_IntrOp<"experimental.stepvector", [0], [], [Pure], 1> { let arguments = (ins); let results = (outs LLVM_Type:$res); let assemblyFormat = "attr-dict `:` type($res)"; @@ -477,7 +477,7 @@ /// Create a call to vector.insert intrinsic def LLVM_vector_insert : LLVM_Op<"intr.vector.insert", - [NoSideEffect, AllTypesMatch<["dstvec", "res"]>, + [Pure, AllTypesMatch<["dstvec", "res"]>, PredOpTrait<"vectors are not bigger than 2^17 bits.", And<[ CPred<"getSrcVectorBitWidth() <= 131072">, CPred<"getDstVectorBitWidth() <= 131072"> @@ -512,7 +512,7 @@ /// Create a call to vector.extract intrinsic def LLVM_vector_extract : LLVM_Op<"intr.vector.extract", - [NoSideEffect, + [Pure, PredOpTrait<"vectors are not bigger than 2^17 bits.", And<[ CPred<"getSrcVectorBitWidth() <= 131072">, CPred<"getResVectorBitWidth() <= 131072"> @@ -548,7 +548,7 @@ // class LLVM_VPBinaryBase - : LLVM_OneResultIntrOp<"vp." # mnem, [0], [], [NoSideEffect]>, + : LLVM_OneResultIntrOp<"vp." # mnem, [0], [], [Pure]>, Arguments<(ins LLVM_VectorOf:$lhs, LLVM_VectorOf:$rhs, LLVM_VectorOf:$mask, I32:$evl)>; @@ -557,14 +557,14 @@ class LLVM_VPBinaryF : LLVM_VPBinaryBase; class LLVM_VPUnaryBase - : LLVM_OneResultIntrOp<"vp." # mnem, [0], [], [NoSideEffect]>, + : LLVM_OneResultIntrOp<"vp." # mnem, [0], [], [Pure]>, Arguments<(ins LLVM_VectorOf:$op, LLVM_VectorOf:$mask, I32:$evl)>; class LLVM_VPUnaryF : LLVM_VPUnaryBase; class LLVM_VPTernaryBase - : LLVM_OneResultIntrOp<"vp." # mnem, [0], [], [NoSideEffect]>, + : LLVM_OneResultIntrOp<"vp." # mnem, [0], [], [Pure]>, Arguments<(ins LLVM_VectorOf:$op1, LLVM_VectorOf:$op2, LLVM_VectorOf:$op3, LLVM_VectorOf:$mask, I32:$evl)>; @@ -572,7 +572,7 @@ class LLVM_VPTernaryF : LLVM_VPTernaryBase; class LLVM_VPReductionBase - : LLVM_OneResultIntrOp<"vp.reduce." # mnem, [], [1], [NoSideEffect]>, + : LLVM_OneResultIntrOp<"vp.reduce." # mnem, [], [1], [Pure]>, Arguments<(ins element:$satrt_value, LLVM_VectorOf:$val, LLVM_VectorOf:$mask, I32:$evl)>; @@ -581,12 +581,12 @@ class LLVM_VPReductionF : LLVM_VPReductionBase; class LLVM_VPSelectBase - : LLVM_OneResultIntrOp<"vp." # mnem, [], [1], [NoSideEffect]>, + : LLVM_OneResultIntrOp<"vp." # mnem, [], [1], [Pure]>, Arguments<(ins LLVM_VectorOf:$cond, LLVM_AnyVector:$true_val, LLVM_AnyVector:$false_val, I32:$evl)>; class LLVM_VPCastBase - : LLVM_OneResultIntrOp<"vp." # mnem, [0], [0], [NoSideEffect]>, + : LLVM_OneResultIntrOp<"vp." # mnem, [0], [0], [Pure]>, Arguments<(ins LLVM_VectorOf:$src, LLVM_VectorOf:$mask, I32:$evl)>; diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td @@ -378,7 +378,7 @@ // // Sample use: derive an entry from this class and populate the fields. // -// def LLVM_Name : LLVM_ZeroResultIntrOp<"name", [0], [NoSideEffect]>, +// def LLVM_Name : LLVM_ZeroResultIntrOp<"name", [0], [Pure]>, // Arguments<(ins LLVM_Type, LLVM_Type)>; // // The mnemonic will be prefixed with "llvm.intr.", where the "llvm." part comes @@ -405,14 +405,14 @@ // LLVM vector reduction over a single vector. class LLVM_VectorReduction : LLVM_OneResultIntrOp<"vector.reduce." # mnem, - [], [0], [NoSideEffect]>, + [], [0], [Pure]>, Arguments<(ins LLVM_Type)>; // LLVM vector reduction over a single vector, with an initial value, // and with permission to reassociate the reduction operations. class LLVM_VectorReductionAcc : LLVM_OpBase, + [Pure]>, Results<(outs LLVM_Type:$res)>, Arguments<(ins LLVM_Type, LLVM_Type, DefaultValuedAttr:$reassoc)> { diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -208,7 +208,7 @@ class LLVM_ArithmeticOpBase traits = []> : LLVM_Op, + !listconcat([Pure, SameOperandsAndResultType], traits)>, LLVM_Builder<"$res = builder.Create" # instName # "($lhs, $rhs);"> { dag commonArgs = (ins LLVM_ScalarOrVectorOf:$lhs, LLVM_ScalarOrVectorOf:$rhs); @@ -237,7 +237,7 @@ class LLVM_UnaryFloatArithmeticOp traits = []> : LLVM_Op], traits)>, + !listconcat([Pure, SameOperandsAndResultType, DeclareOpInterfaceMethods], traits)>, LLVM_Builder<"$res = builder.Create" # instName # "($operand);"> { let arguments = (ins type:$operand, DefaultValuedAttr:$fastmathFlags); let results = (outs type:$res); @@ -285,7 +285,7 @@ } // Other integer operations. -def LLVM_ICmpOp : LLVM_Op<"icmp", [NoSideEffect]> { +def LLVM_ICmpOp : LLVM_Op<"icmp", [Pure]> { let arguments = (ins ICmpPredicate:$predicate, AnyTypeOf<[LLVM_ScalarOrVectorOf, LLVM_ScalarOrVectorOf]>:$lhs, AnyTypeOf<[LLVM_ScalarOrVectorOf, LLVM_ScalarOrVectorOf]>:$rhs); @@ -330,7 +330,7 @@ // Other floating-point operations. def LLVM_FCmpOp : LLVM_Op<"fcmp", [ - NoSideEffect, DeclareOpInterfaceMethods]> { + Pure, DeclareOpInterfaceMethods]> { let arguments = (ins FCmpPredicate:$predicate, LLVM_ScalarOrVectorOf:$lhs, LLVM_ScalarOrVectorOf:$rhs, @@ -438,7 +438,7 @@ let hasVerifier = 1; } -def LLVM_GEPOp : LLVM_Op<"getelementptr", [NoSideEffect]> { +def LLVM_GEPOp : LLVM_Op<"getelementptr", [Pure]> { let arguments = (ins LLVM_ScalarOrVectorOf:$base, Variadic>:$dynamicIndices, DenseI32ArrayAttr:$rawConstantIndices, @@ -583,7 +583,7 @@ // Casts. class LLVM_CastOp traits = []> : - LLVM_Op, + LLVM_Op, LLVM_Builder<"$res = builder.Create" # instName # "($arg, $_resultType);"> { let arguments = (ins type:$arg); let results = (outs resultType:$res); @@ -733,7 +733,7 @@ // ExtractElementOp //===----------------------------------------------------------------------===// -def LLVM_ExtractElementOp : LLVM_Op<"extractelement", [NoSideEffect, +def LLVM_ExtractElementOp : LLVM_Op<"extractelement", [Pure, TypesMatchWith<"result type matches vector element type", "vector", "res", "LLVM::getVectorElementType($_self)">]> { let summary = "Extract an element from an LLVM vector."; @@ -764,7 +764,7 @@ // ExtractValueOp //===----------------------------------------------------------------------===// -def LLVM_ExtractValueOp : LLVM_Op<"extractvalue", [NoSideEffect]> { +def LLVM_ExtractValueOp : LLVM_Op<"extractvalue", [Pure]> { let summary = "Extract a value from an LLVM struct."; let arguments = (ins LLVM_AnyAggregate:$container, DenseI64ArrayAttr:$position); @@ -792,7 +792,7 @@ // InsertElementOp //===----------------------------------------------------------------------===// -def LLVM_InsertElementOp : LLVM_Op<"insertelement", [NoSideEffect, +def LLVM_InsertElementOp : LLVM_Op<"insertelement", [Pure, TypesMatchWith<"argument type matches vector element type", "vector", "value", "LLVM::getVectorElementType($_self)">, AllTypesMatch<["res", "vector"]>]> { @@ -824,7 +824,7 @@ //===----------------------------------------------------------------------===// def LLVM_InsertValueOp : LLVM_Op< - "insertvalue", [NoSideEffect, AllTypesMatch<["container", "res"]>]> { + "insertvalue", [Pure, AllTypesMatch<["container", "res"]>]> { let summary = "Insert a value into an LLVM struct."; let arguments = (ins LLVM_AnyAggregate:$container, LLVM_PrimitiveType:$value, @@ -850,7 +850,7 @@ //===----------------------------------------------------------------------===// def LLVM_ShuffleVectorOp : LLVM_Op<"shufflevector", - [NoSideEffect, AllTypesMatch<["v1", "v2"]>]> { + [Pure, AllTypesMatch<["v1", "v2"]>]> { let summary = "Construct a permutation of two vectors."; let arguments = (ins LLVM_AnyVector:$v1, LLVM_AnyVector:$v2, @@ -878,7 +878,7 @@ // Misc operations. def LLVM_SelectOp : LLVM_Op<"select", - [NoSideEffect, AllTypesMatch<["trueValue", "falseValue", "res"]>]>, + [Pure, AllTypesMatch<["trueValue", "falseValue", "res"]>]>, LLVM_Builder< "$res = builder.CreateSelect($condition, $trueValue, $falseValue);"> { let arguments = (ins LLVM_ScalarOrVectorOf:$condition, @@ -905,7 +905,7 @@ // Terminators. def LLVM_BrOp : LLVM_TerminatorOp<"br", - [DeclareOpInterfaceMethods, NoSideEffect]> { + [DeclareOpInterfaceMethods, Pure]> { let arguments = (ins Variadic:$destOperands); let successors = (successor AnySuccessor:$dest); let assemblyFormat = [{ @@ -915,7 +915,7 @@ } def LLVM_CondBrOp : LLVM_TerminatorOp<"cond_br", [AttrSizedOperandSegments, DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let arguments = (ins I1:$condition, Variadic:$trueDestOperands, Variadic:$falseDestOperands, @@ -955,7 +955,7 @@ // ReturnOp //===----------------------------------------------------------------------===// -def LLVM_ReturnOp : LLVM_TerminatorOp<"return", [NoSideEffect]> { +def LLVM_ReturnOp : LLVM_TerminatorOp<"return", [Pure]> { let arguments = (ins Optional:$arg); let assemblyFormat = "attr-dict ($arg^ `:` type($arg))?"; @@ -1000,7 +1000,7 @@ def LLVM_SwitchOp : LLVM_TerminatorOp<"switch", [AttrSizedOperandSegments, DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let arguments = (ins AnyInteger:$value, Variadic:$defaultOperands, @@ -1116,7 +1116,7 @@ } def LLVM_AddressOfOp : LLVM_Op<"mlir.addressof", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let arguments = (ins FlatSymbolRefAttr:$global_name); let results = (outs LLVM_AnyPointer:$res); @@ -1578,7 +1578,7 @@ } def LLVM_NullOp - : LLVM_Op<"mlir.null", [NoSideEffect]>, + : LLVM_Op<"mlir.null", [Pure]>, LLVM_Builder<"$res = llvm::ConstantPointerNull::get(" " cast($_resultType));"> { let summary = "Defines a value containing a null pointer to LLVM type."; @@ -1604,7 +1604,7 @@ let assemblyFormat = "attr-dict `:` type($res)"; } -def LLVM_UndefOp : LLVM_Op<"mlir.undef", [NoSideEffect]>, +def LLVM_UndefOp : LLVM_Op<"mlir.undef", [Pure]>, LLVM_Builder<"$res = llvm::UndefValue::get($_resultType);"> { let summary = "Creates an undefined value of LLVM dialect type."; let description = [{ @@ -1626,7 +1626,7 @@ } def LLVM_ConstantOp - : LLVM_Op<"mlir.constant", [NoSideEffect, ConstantLike]>, + : LLVM_Op<"mlir.constant", [Pure, ConstantLike]>, LLVM_Builder<[{$res = getLLVMConstant($_resultType, $value, $_location, moduleTranslation);}]> { diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -73,7 +73,7 @@ //===----------------------------------------------------------------------===// class NVVM_SpecialRegisterOp traits = []> : - NVVM_IntrOp { + NVVM_IntrOp { let arguments = (ins); let assemblyFormat = "attr-dict `:` type($res)"; } @@ -105,7 +105,7 @@ // NVVM approximate op definitions //===----------------------------------------------------------------------===// -def NVVM_RcpApproxFtzF32Op : NVVM_IntrOp<"rcp.approx.ftz.f", [NoSideEffect], 1> { +def NVVM_RcpApproxFtzF32Op : NVVM_IntrOp<"rcp.approx.ftz.f", [Pure], 1> { let arguments = (ins F32:$arg); let results = (outs F32:$res); let assemblyFormat = "$arg attr-dict `:` type($res)"; diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td @@ -47,7 +47,7 @@ class ROCDL_SpecialRegisterOp traits = []> : - ROCDL_Op, + ROCDL_Op, Results<(outs LLVM_Type:$res)>, Arguments<(ins)> { string llvmBuilder = "$res = createIntrinsicCall(builder," # "llvm::Intrinsic::amdgcn_" # !subst(".","_", mnemonic) # ");"; @@ -56,7 +56,7 @@ class ROCDL_DeviceFunctionOp traits = []> : - ROCDL_Op, + ROCDL_Op, Results<(outs LLVM_Type:$res)>, Arguments<(ins)> { string llvmBuilder = "$res = createDeviceFunctionCall(builder, \"" # device_function # "\", " # parameter # ");"; diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -24,7 +24,7 @@ class Linalg_Op traits = []> : Op; -def Linalg_YieldOp : Linalg_Op<"yield", [NoSideEffect, ReturnLike, Terminator]>, +def Linalg_YieldOp : Linalg_Op<"yield", [Pure, ReturnLike, Terminator]>, Arguments<(ins Variadic:$values)> { let summary = "Linalg yield operation"; let description = [{ @@ -43,7 +43,7 @@ let hasVerifier = 1; } -def Linalg_IndexOp : Linalg_Op<"index", [NoSideEffect]>, +def Linalg_IndexOp : Linalg_Op<"index", [Pure]>, Arguments<(ins ConfinedAttr]>:$dim)>, Results<(outs Index:$result)> { let summary = "linalg index operation"; diff --git a/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td b/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td --- a/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td +++ b/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td @@ -203,7 +203,7 @@ //===----------------------------------------------------------------------===// def MLProgram_GlobalLoadConstOp : MLProgram_Op<"global_load_const", [ - NoSideEffect, + Pure, DeclareOpInterfaceMethods ]> { let summary = "Direct load a constant value from a global"; @@ -443,7 +443,7 @@ //===----------------------------------------------------------------------===// def MLProgram_OutputOp : MLProgram_Op<"output", [ - NoSideEffect, HasParent<"SubgraphOp">, ReturnLike, Terminator + Pure, HasParent<"SubgraphOp">, ReturnLike, Terminator ]> { let summary = "Outputs values from a subgraph function"; let description = [{ @@ -469,7 +469,7 @@ //===----------------------------------------------------------------------===// def MLProgram_ReturnOp : MLProgram_Op<"return", [ - NoSideEffect, HasParent<"FuncOp">, ReturnLike, Terminator + Pure, HasParent<"FuncOp">, ReturnLike, Terminator ]> { let summary = "Returns values from a `func` function"; let description = [{ @@ -495,7 +495,7 @@ //===----------------------------------------------------------------------===// def MLProgram_TokenOp : MLProgram_Op<"token", [ - NoSideEffect + Pure ]> { let summary = "Produces a new token value"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Math/IR/MathOps.td b/mlir/include/mlir/Dialect/Math/IR/MathOps.td --- a/mlir/include/mlir/Dialect/Math/IR/MathOps.td +++ b/mlir/include/mlir/Dialect/Math/IR/MathOps.td @@ -17,7 +17,7 @@ // Base class for math dialect ops. Ops in this dialect have no side effects and // can be applied element-wise to vectors and tensors. class Math_Op traits = []> : - Op] # ElementwiseMappable.traits>; diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -327,7 +327,7 @@ [AutomaticAllocationScope, DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"AllocaScopeReturnOp">, - RecursiveSideEffects, + RecursiveMemoryEffects, NoRegionArguments]> { let summary = "explicitly delimited scope for stack allocation"; let description = [{ @@ -375,7 +375,7 @@ def MemRef_AllocaScopeReturnOp : MemRef_Op<"alloca_scope.return", [HasParent<"AllocaScopeOp">, - NoSideEffect, + Pure, ReturnLike, Terminator]> { let summary = "terminator for alloca_scope operation"; @@ -404,7 +404,7 @@ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, MemRefsNormalizable, - NoSideEffect, + Pure, SameOperandsAndResultShape, ViewLikeOpInterface ]> { @@ -544,7 +544,7 @@ def MemRef_DimOp : MemRef_Op<"dim", [ DeclareOpInterfaceMethods, MemRefsNormalizable, - NoSideEffect, + Pure, ShapedDimOpInterface]> { let summary = "dimension index operation"; let description = [{ @@ -809,7 +809,7 @@ def MemRef_ExtractAlignedPointerAsIndexOp : MemRef_Op<"extract_aligned_pointer_as_index", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, SameVariadicResultSize]> { let summary = "Extracts a memref's underlying aligned pointer as an index"; let description = [{ @@ -849,7 +849,7 @@ def MemRef_ExtractStridedMetadataOp : MemRef_Op<"extract_strided_metadata", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, SameVariadicResultSize]> { let summary = "Extracts a buffer base with offset and strides"; let description = [{ @@ -974,7 +974,7 @@ def AtomicYieldOp : MemRef_Op<"atomic_yield", [ HasParent<"GenericAtomicRMWOp">, - NoSideEffect, + Pure, Terminator ]> { let summary = "yield operation for GenericAtomicRMWOp"; @@ -993,7 +993,7 @@ //===----------------------------------------------------------------------===// def MemRef_GetGlobalOp : MemRef_Op<"get_global", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "get the memref pointing to a global variable"; let description = [{ The `memref.get_global` operation retrieves the memref pointing to a @@ -1209,7 +1209,7 @@ DeclareOpInterfaceMethods, AttrSizedOperandSegments, MemRefsNormalizable, - NoSideEffect, + Pure, OffsetSizeAndStrideOpInterface, ViewLikeOpInterface ]> { @@ -1305,7 +1305,7 @@ // RankOp //===----------------------------------------------------------------------===// -def MemRef_RankOp : MemRef_Op<"rank", [NoSideEffect]> { +def MemRef_RankOp : MemRef_Op<"rank", [Pure]> { let summary = "rank operation"; let description = [{ The `memref.rank` operation takes a memref operand and returns its rank. @@ -1331,7 +1331,7 @@ def MemRef_ReshapeOp: MemRef_Op<"reshape", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, ViewLikeOpInterface]> { let summary = "memref reshape operation"; let description = [{ @@ -1396,7 +1396,7 @@ class MemRef_ReassociativeReshapeOp traits = []> : MemRef_Op, + [Pure, ViewLikeOpInterface])>, Arguments<(ins AnyStridedMemRef:$src, IndexListArrayAttr:$reassociation)>, Results<(outs AnyStridedMemRef:$result)>{ @@ -1681,7 +1681,7 @@ DeclareOpInterfaceMethods, AttrSizedOperandSegments, OffsetSizeAndStrideOpInterface, - NoSideEffect + Pure ]> { let summary = "memref subview operation"; let description = [{ @@ -1966,7 +1966,7 @@ def MemRef_TransposeOp : MemRef_Op<"transpose", [ DeclareOpInterfaceMethods, - NoSideEffect]>, + Pure]>, Arguments<(ins AnyStridedMemRef:$in, AffineMapAttr:$permutation)>, Results<(outs AnyStridedMemRef)> { let summary = "`transpose` produces a new strided memref (metadata-only)"; @@ -2003,7 +2003,7 @@ def MemRef_ViewOp : MemRef_Op<"view", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "memref view operation"; let description = [{ The "view" operation extracts an N-D contiguous memref with empty layout map diff --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td --- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td +++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td @@ -98,7 +98,7 @@ } def NVGPU_MmaSyncOp : NVGPU_Op<"mma.sync", [ - NoSideEffect, + Pure, PredOpTrait<"matrixA and matrixB have same element type", TCopVTEtIsSameAs<0, 1>>]> { let description = [{ diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -49,7 +49,7 @@ def ParallelOp : OpenMP_Op<"parallel", [ AutomaticAllocationScope, AttrSizedOperandSegments, DeclareOpInterfaceMethods, - RecursiveSideEffects, ReductionClauseInterface]> { + RecursiveMemoryEffects, ReductionClauseInterface]> { let summary = "parallel construct"; let description = [{ The parallel construct includes a region of code which is to be executed @@ -113,7 +113,7 @@ let hasVerifier = 1; } -def TerminatorOp : OpenMP_Op<"terminator", [Terminator, NoSideEffect]> { +def TerminatorOp : OpenMP_Op<"terminator", [Terminator, Pure]> { let summary = "terminator for OpenMP regions"; let description = [{ A terminator operation for regions that appear in the body of OpenMP @@ -249,7 +249,7 @@ def WsLoopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments, AllTypesMatch<["lowerBound", "upperBound", "step"]>, - RecursiveSideEffects, ReductionClauseInterface]> { + RecursiveMemoryEffects, ReductionClauseInterface]> { let summary = "worksharing-loop construct"; let description = [{ The worksharing-loop construct specifies that the iterations of the loop(s) @@ -435,7 +435,7 @@ def YieldOp : OpenMP_Op<"yield", - [NoSideEffect, ReturnLike, Terminator, + [Pure, ReturnLike, Terminator, ParentOneOf<["WsLoopOp", "ReductionDeclareOp", "AtomicUpdateOp", "SimdLoopOp"]>]> { let summary = "loop yield and termination operation"; @@ -543,7 +543,7 @@ } def TaskLoopOp : OpenMP_Op<"taskloop", [AttrSizedOperandSegments, - AutomaticAllocationScope, RecursiveSideEffects, + AutomaticAllocationScope, RecursiveMemoryEffects, AllTypesMatch<["lowerBound", "upperBound", "step"]>, ReductionClauseInterface]> { let summary = "taskloop construct"; @@ -1097,7 +1097,7 @@ def AtomicUpdateOp : OpenMP_Op<"atomic.update", [SingleBlockImplicitTerminator<"YieldOp">, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "performs an atomic update"; diff --git a/mlir/include/mlir/Dialect/PDL/IR/PDLOps.td b/mlir/include/mlir/Dialect/PDL/IR/PDLOps.td --- a/mlir/include/mlir/Dialect/PDL/IR/PDLOps.td +++ b/mlir/include/mlir/Dialect/PDL/IR/PDLOps.td @@ -481,7 +481,7 @@ // pdl::ResultOp //===----------------------------------------------------------------------===// -def PDL_ResultOp : PDL_Op<"result", [NoSideEffect]> { +def PDL_ResultOp : PDL_Op<"result", [Pure]> { let summary = "Extract a result from an operation"; let description = [{ `pdl.result` operations extract result edges from an operation node within @@ -513,7 +513,7 @@ // pdl::ResultsOp //===----------------------------------------------------------------------===// -def PDL_ResultsOp : PDL_Op<"results", [NoSideEffect]> { +def PDL_ResultsOp : PDL_Op<"results", [Pure]> { let summary = "Extract a result group from an operation"; let description = [{ `pdl.results` operations extract a result group from an operation within a diff --git a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td --- a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td +++ b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td @@ -144,7 +144,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_AreEqualOp - : PDLInterp_PredicateOp<"are_equal", [NoSideEffect, SameTypeOperands]> { + : PDLInterp_PredicateOp<"are_equal", [Pure, SameTypeOperands]> { let summary = "Check if two positional values or ranges are equivalent"; let description = [{ `pdl_interp.are_equal` operations compare two positional values for @@ -166,7 +166,7 @@ // pdl_interp::BranchOp //===----------------------------------------------------------------------===// -def PDLInterp_BranchOp : PDLInterp_Op<"branch", [NoSideEffect, Terminator]> { +def PDLInterp_BranchOp : PDLInterp_Op<"branch", [Pure, Terminator]> { let summary = "General branch operation"; let description = [{ `pdl_interp.branch` operations expose general branch functionality to the @@ -189,7 +189,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CheckAttributeOp - : PDLInterp_PredicateOp<"check_attribute", [NoSideEffect]> { + : PDLInterp_PredicateOp<"check_attribute", [Pure]> { let summary = "Check the value of an `Attribute`"; let description = [{ `pdl_interp.check_attribute` operations compare the value of a given @@ -214,7 +214,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CheckOperandCountOp - : PDLInterp_PredicateOp<"check_operand_count", [NoSideEffect]> { + : PDLInterp_PredicateOp<"check_operand_count", [Pure]> { let summary = "Check the number of operands of an `Operation`"; let description = [{ `pdl_interp.check_operand_count` operations compare the number of operands @@ -248,7 +248,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CheckOperationNameOp - : PDLInterp_PredicateOp<"check_operation_name", [NoSideEffect]> { + : PDLInterp_PredicateOp<"check_operation_name", [Pure]> { let summary = "Check the OperationName of an `Operation`"; let description = [{ `pdl_interp.check_operation_name` operations compare the name of a given @@ -271,7 +271,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CheckResultCountOp - : PDLInterp_PredicateOp<"check_result_count", [NoSideEffect]> { + : PDLInterp_PredicateOp<"check_result_count", [Pure]> { let summary = "Check the number of results of an `Operation`"; let description = [{ `pdl_interp.check_result_count` operations compare the number of results @@ -305,7 +305,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CheckTypeOp - : PDLInterp_PredicateOp<"check_type", [NoSideEffect]> { + : PDLInterp_PredicateOp<"check_type", [Pure]> { let summary = "Compare a type to a known value"; let description = [{ `pdl_interp.check_type` operations compare a type with a statically known @@ -328,7 +328,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CheckTypesOp - : PDLInterp_PredicateOp<"check_types", [NoSideEffect]> { + : PDLInterp_PredicateOp<"check_types", [Pure]> { let summary = "Compare a range of types to a range of known values"; let description = [{ `pdl_interp.check_types` operations compare a range of types with a @@ -352,7 +352,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_ContinueOp - : PDLInterp_Op<"continue", [NoSideEffect, HasParent<"ForEachOp">, + : PDLInterp_Op<"continue", [Pure, HasParent<"ForEachOp">, Terminator]> { let summary = "Breaks the current iteration"; let description = [{ @@ -375,7 +375,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_CreateAttributeOp - : PDLInterp_Op<"create_attribute", [NoSideEffect]> { + : PDLInterp_Op<"create_attribute", [Pure]> { let summary = "Create an interpreter handle to a constant `Attribute`"; let description = [{ `pdl_interp.create_attribute` operations generate a handle within the @@ -453,7 +453,7 @@ // pdl_interp::CreateTypeOp //===----------------------------------------------------------------------===// -def PDLInterp_CreateTypeOp : PDLInterp_Op<"create_type", [NoSideEffect]> { +def PDLInterp_CreateTypeOp : PDLInterp_Op<"create_type", [Pure]> { let summary = "Create an interpreter handle to a constant `Type`"; let description = [{ `pdl_interp.create_type` operations generate a handle within the interpreter @@ -481,7 +481,7 @@ // pdl_interp::CreateTypesOp //===----------------------------------------------------------------------===// -def PDLInterp_CreateTypesOp : PDLInterp_Op<"create_types", [NoSideEffect]> { +def PDLInterp_CreateTypesOp : PDLInterp_Op<"create_types", [Pure]> { let summary = "Create an interpreter handle to a range of constant `Type`s"; let description = [{ `pdl_interp.create_types` operations generate a handle within the @@ -533,7 +533,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_ExtractOp - : PDLInterp_Op<"extract", [NoSideEffect, + : PDLInterp_Op<"extract", [Pure, TypesMatchWith< "`range` is a PDL range whose element type matches type of `result`", "result", "range", "pdl::RangeType::get($_self)">]> { @@ -569,7 +569,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_FinalizeOp - : PDLInterp_Op<"finalize", [NoSideEffect, Terminator]> { + : PDLInterp_Op<"finalize", [Pure, Terminator]> { let summary = "Finalize a pattern match or rewrite sequence"; let description = [{ `pdl_interp.finalize` is used to denote the termination of a match or @@ -681,7 +681,7 @@ // pdl_interp::GetAttributeOp //===----------------------------------------------------------------------===// -def PDLInterp_GetAttributeOp : PDLInterp_Op<"get_attribute", [NoSideEffect]> { +def PDLInterp_GetAttributeOp : PDLInterp_Op<"get_attribute", [Pure]> { let summary = "Get a specified attribute value from an `Operation`"; let description = [{ `pdl_interp.get_attribute` operations try to get a specific attribute from @@ -705,7 +705,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_GetAttributeTypeOp - : PDLInterp_Op<"get_attribute_type", [NoSideEffect]> { + : PDLInterp_Op<"get_attribute_type", [Pure]> { let summary = "Get the result type of a specified `Attribute`"; let description = [{ `pdl_interp.get_attribute_type` operations get the resulting type of a @@ -734,7 +734,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_GetDefiningOpOp - : PDLInterp_Op<"get_defining_op", [NoSideEffect]> { + : PDLInterp_Op<"get_defining_op", [Pure]> { let summary = "Get the defining operation of a `Value`"; let description = [{ `pdl_interp.get_defining_op` operations try to get the defining operation @@ -758,7 +758,7 @@ // pdl_interp::GetOperandOp //===----------------------------------------------------------------------===// -def PDLInterp_GetOperandOp : PDLInterp_Op<"get_operand", [NoSideEffect]> { +def PDLInterp_GetOperandOp : PDLInterp_Op<"get_operand", [Pure]> { let summary = "Get a specified operand from an `Operation`"; let description = [{ `pdl_interp.get_operand` operations try to get a specific operand from an @@ -782,7 +782,7 @@ // pdl_interp::GetOperandsOp //===----------------------------------------------------------------------===// -def PDLInterp_GetOperandsOp : PDLInterp_Op<"get_operands", [NoSideEffect]> { +def PDLInterp_GetOperandsOp : PDLInterp_Op<"get_operands", [Pure]> { let summary = "Get a specified operand group from an `Operation`"; let description = [{ `pdl_interp.get_operands` operations try to get a specific operand @@ -825,7 +825,7 @@ // pdl_interp::GetResultOp //===----------------------------------------------------------------------===// -def PDLInterp_GetResultOp : PDLInterp_Op<"get_result", [NoSideEffect]> { +def PDLInterp_GetResultOp : PDLInterp_Op<"get_result", [Pure]> { let summary = "Get a specified result from an `Operation`"; let description = [{ `pdl_interp.get_result` operations try to get a specific result from an @@ -849,7 +849,7 @@ // pdl_interp::GetResultsOp //===----------------------------------------------------------------------===// -def PDLInterp_GetResultsOp : PDLInterp_Op<"get_results", [NoSideEffect]> { +def PDLInterp_GetResultsOp : PDLInterp_Op<"get_results", [Pure]> { let summary = "Get a specified result group from an `Operation`"; let description = [{ `pdl_interp.get_results` operations try to get a specific result group @@ -898,7 +898,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_GetUsersOp - : PDLInterp_Op<"get_users", [NoSideEffect]> { + : PDLInterp_Op<"get_users", [Pure]> { let summary = "Get the users of a `Value`"; let description = [{ `pdl_interp.get_users` extracts the users that accept this value. In the @@ -933,7 +933,7 @@ // pdl_interp::GetValueTypeOp //===----------------------------------------------------------------------===// -def PDLInterp_GetValueTypeOp : PDLInterp_Op<"get_value_type", [NoSideEffect, +def PDLInterp_GetValueTypeOp : PDLInterp_Op<"get_value_type", [Pure, TypesMatchWith<"`value` type matches arity of `result`", "result", "value", "getGetValueTypeOpValueType($_self)">]> { let summary = "Get the result type of a specified `Value`"; @@ -973,7 +973,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_IsNotNullOp - : PDLInterp_PredicateOp<"is_not_null", [NoSideEffect]> { + : PDLInterp_PredicateOp<"is_not_null", [Pure]> { let summary = "Check if a positional value is non-null"; let description = [{ `pdl_interp.is_not_null` operations check that a positional value or range @@ -1061,7 +1061,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_SwitchAttributeOp - : PDLInterp_SwitchOp<"switch_attribute", [NoSideEffect]> { + : PDLInterp_SwitchOp<"switch_attribute", [Pure]> { let summary = "Switch on the value of an `Attribute`"; let description = [{ `pdl_interp.switch_attribute` operations compare the value of a given @@ -1094,7 +1094,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_SwitchOperandCountOp - : PDLInterp_SwitchOp<"switch_operand_count", [NoSideEffect]> { + : PDLInterp_SwitchOp<"switch_operand_count", [Pure]> { let summary = "Switch on the operand count of an `Operation`"; let description = [{ `pdl_interp.switch_operand_count` operations compare the operand count of a @@ -1128,7 +1128,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_SwitchOperationNameOp - : PDLInterp_SwitchOp<"switch_operation_name", [NoSideEffect]> { + : PDLInterp_SwitchOp<"switch_operation_name", [Pure]> { let summary = "Switch on the OperationName of an `Operation`"; let description = [{ `pdl_interp.switch_operation_name` operations compare the name of a given @@ -1166,7 +1166,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_SwitchResultCountOp - : PDLInterp_SwitchOp<"switch_result_count", [NoSideEffect]> { + : PDLInterp_SwitchOp<"switch_result_count", [Pure]> { let summary = "Switch on the result count of an `Operation`"; let description = [{ `pdl_interp.switch_result_count` operations compare the result count of a @@ -1199,7 +1199,7 @@ // pdl_interp::SwitchTypeOp //===----------------------------------------------------------------------===// -def PDLInterp_SwitchTypeOp : PDLInterp_SwitchOp<"switch_type", [NoSideEffect]> { +def PDLInterp_SwitchTypeOp : PDLInterp_SwitchOp<"switch_type", [Pure]> { let summary = "Switch on a `Type` value"; let description = [{ `pdl_interp.switch_type` operations compare a type with a set of statically @@ -1238,7 +1238,7 @@ //===----------------------------------------------------------------------===// def PDLInterp_SwitchTypesOp : PDLInterp_SwitchOp<"switch_types", - [NoSideEffect]> { + [Pure]> { let summary = "Switch on a range of `Type` values"; let description = [{ `pdl_interp.switch_types` operations compare a range of types with a set of diff --git a/mlir/include/mlir/Dialect/Quant/QuantOps.td b/mlir/include/mlir/Dialect/Quant/QuantOps.td --- a/mlir/include/mlir/Dialect/Quant/QuantOps.td +++ b/mlir/include/mlir/Dialect/Quant/QuantOps.td @@ -47,7 +47,7 @@ // (where the operand and result type are not quantized) at all points where // it is legal to use a quantized representation (but is not known to be // acceptable). -def quant_QuantizeCastOp : quant_Op<"qcast", [NoSideEffect]> { +def quant_QuantizeCastOp : quant_Op<"qcast", [Pure]> { let arguments = (ins quant_RealValueType:$arg); let results = (outs quant_RealValueType); } @@ -62,7 +62,7 @@ // Especially early in transformation, it is common to have dcasts on // all operands to ops that must operate with the expressed type (typically // math ops prior to lowering to target-specific, quantized kernels). -def quant_DequantizeCastOp : quant_Op<"dcast", [NoSideEffect]> { +def quant_DequantizeCastOp : quant_Op<"dcast", [Pure]> { let arguments = (ins quant_RealValueType:$arg); let results = (outs quant_RealValueType); } @@ -78,7 +78,7 @@ // i8 -> !quant<"uniform[i8:f32]{1.0}"> // tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">> // vector<4xi8> -> vector<4x!quant<"uniform[i8:f32]{1.0}">> -def quant_StorageCastOp : quant_Op<"scast", [NoSideEffect]> { +def quant_StorageCastOp : quant_Op<"scast", [Pure]> { let arguments = (ins quant_RealOrStorageValueType:$arg); let results = (outs quant_RealOrStorageValueType); let hasFolder = 1; diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td --- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td +++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td @@ -37,7 +37,7 @@ def ConditionOp : SCF_Op<"condition", [ HasParent<"WhileOp">, DeclareOpInterfaceMethods, - NoSideEffect, + Pure, Terminator ]> { let summary = "loop continuation condition"; @@ -121,7 +121,7 @@ "getSingleUpperBound"]>, DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"scf::YieldOp">, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "for operation"; let description = [{ The "scf.for" operation represents a loop taking 3 SSA value as operands @@ -345,7 +345,7 @@ def ForeachThreadOp : SCF_Op<"foreach_thread", [ AttrSizedOperandSegments, SingleBlockImplicitTerminator<"scf::PerformConcurrentlyOp">, - RecursiveSideEffects, + RecursiveMemoryEffects, AutomaticAllocationScope, ]> { let summary = "evaluate a block multiple times in parallel"; @@ -545,7 +545,7 @@ //===----------------------------------------------------------------------===// def PerformConcurrentlyOp : SCF_Op<"foreach_thread.perform_concurrently", [ - NoSideEffect, + Pure, Terminator, DeclareOpInterfaceMethods, HasParent<"ForeachThreadOp">, @@ -589,7 +589,7 @@ [DeclareOpInterfaceMethods, - SingleBlockImplicitTerminator<"scf::YieldOp">, RecursiveSideEffects, + SingleBlockImplicitTerminator<"scf::YieldOp">, RecursiveMemoryEffects, NoRegionArguments]> { let summary = "if-then-else operation"; let description = [{ @@ -687,7 +687,7 @@ [AutomaticAllocationScope, AttrSizedOperandSegments, DeclareOpInterfaceMethods, - RecursiveSideEffects, + RecursiveMemoryEffects, SingleBlockImplicitTerminator<"scf::YieldOp">]> { let summary = "parallel for operation"; let description = [{ @@ -826,7 +826,7 @@ //===----------------------------------------------------------------------===// def ReduceReturnOp : - SCF_Op<"reduce.return", [HasParent<"ReduceOp">, NoSideEffect, + SCF_Op<"reduce.return", [HasParent<"ReduceOp">, Pure, Terminator]> { let summary = "terminator for reduce operation"; let description = [{ @@ -850,7 +850,7 @@ def WhileOp : SCF_Op<"while", [DeclareOpInterfaceMethods, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "a generic 'while' loop"; let description = [{ This operation represents a generic "while"/"do-while" loop that keeps @@ -986,7 +986,7 @@ // YieldOp //===----------------------------------------------------------------------===// -def YieldOp : SCF_Op<"yield", [NoSideEffect, ReturnLike, Terminator, +def YieldOp : SCF_Op<"yield", [Pure, ReturnLike, Terminator, ParentOneOf<["ExecuteRegionOp, ForOp", "IfOp, ParallelOp, WhileOp"]>]> { let summary = "loop yield and termination operation"; diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td @@ -23,7 +23,7 @@ // Operands type same as result type. SPIRV_BinaryOp { + [Pure, SameOperandsAndResultType])> { // In addition to normal types arithmetic instructions can support cooperative // matrix. let arguments = (ins @@ -42,7 +42,7 @@ // Operand type same as result type. SPIRV_UnaryOp; + [Pure, SameOperandsAndResultType])>; // ----- @@ -312,7 +312,7 @@ def SPIRV_IAddCarryOp : SPIRV_BinaryOp<"IAddCarry", SPIRV_AnyStruct, SPIRV_Integer, - [Commutative, NoSideEffect]> { + [Commutative, Pure]> { let summary = [{ Integer addition of Operand 1 and Operand 2, including the carry. }]; @@ -448,7 +448,7 @@ // ----- def SPIRV_ISubBorrowOp : SPIRV_BinaryOp<"ISubBorrow", SPIRV_AnyStruct, SPIRV_Integer, - [NoSideEffect]> { + [Pure]> { let summary = [{ Result is the unsigned integer subtraction of Operand 2 from Operand 1, and what it needed to borrow. @@ -680,7 +680,7 @@ // ----- -def SPIRV_VectorTimesScalarOp : SPIRV_Op<"VectorTimesScalar", [NoSideEffect]> { +def SPIRV_VectorTimesScalarOp : SPIRV_Op<"VectorTimesScalar", [Pure]> { let summary = "Scale a floating-point vector."; let description = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td @@ -21,13 +21,13 @@ // All the operands type used in bit instructions are SPIRV_Integer. SPIRV_BinaryOp { + [Pure, SameOperandsAndResultType])> { let assemblyFormat = "operands attr-dict `:` type($result)"; } class SPIRV_BitFieldExtractOp traits = []> : SPIRV_Op])> { + [Pure, AllTypesMatch<["base", "result"]>])> { let arguments = (ins SPIRV_ScalarOrVectorOf:$base, SPIRV_Integer:$offset, @@ -48,12 +48,12 @@ class SPIRV_BitUnaryOp traits = []> : SPIRV_UnaryOp; + [Pure, SameOperandsAndResultType])>; class SPIRV_ShiftOp traits = []> : SPIRV_BinaryOp])> { let assemblyFormat = [{ operands attr-dict `:` type($operand1) `,` type($operand2) @@ -101,7 +101,7 @@ // ----- def SPIRV_BitFieldInsertOp : SPIRV_Op<"BitFieldInsert", - [NoSideEffect, AllTypesMatch<["base", "insert", "result"]>]> { + [Pure, AllTypesMatch<["base", "insert", "result"]>]> { let summary = [{ Make a copy of an object, with a modified bit field that comes from another object. diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td @@ -34,7 +34,7 @@ // Base class for OpenCL unary ops. class SPIRV_CLUnaryOp traits = []> : - SPIRV_CLOp { + SPIRV_CLOp { let arguments = (ins SPIRV_ScalarOrVectorOf:$operand @@ -59,7 +59,7 @@ // Base class for OpenCL binary ops. class SPIRV_CLBinaryOp traits = []> : - SPIRV_CLOp { + SPIRV_CLOp { let arguments = (ins SPIRV_ScalarOrVectorOf:$lhs, @@ -85,7 +85,7 @@ // Base class for OpenCL binary ops. class SPIRV_CLTernaryOp traits = []> : - SPIRV_CLOp { + SPIRV_CLOp { let arguments = (ins SPIRV_ScalarOrVectorOf:$x, diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td @@ -21,7 +21,7 @@ list traits = []> : SPIRV_Op { + [Pure, SameOperandsAndResultShape])> { let arguments = (ins SPIRV_ScalarOrVectorOrCoopMatrixOf:$operand ); @@ -36,7 +36,7 @@ // ----- -def SPIRV_BitcastOp : SPIRV_Op<"Bitcast", [NoSideEffect]> { +def SPIRV_BitcastOp : SPIRV_Op<"Bitcast", [Pure]> { let summary = "Bit pattern-preserving type conversion."; let description = [{ @@ -332,7 +332,7 @@ } // ----- -def SPIRV_PtrCastToGenericOp : SPIRV_Op<"PtrCastToGeneric", [NoSideEffect]> { +def SPIRV_PtrCastToGenericOp : SPIRV_Op<"PtrCastToGeneric", [Pure]> { let summary = "Convert a pointer’s Storage Class to Generic."; let description = [{ @@ -375,7 +375,7 @@ // ----- -def SPIRV_GenericCastToPtrOp : SPIRV_Op<"GenericCastToPtr", [NoSideEffect]> { +def SPIRV_GenericCastToPtrOp : SPIRV_Op<"GenericCastToPtr", [Pure]> { let summary = "Convert a pointer’s Storage Class to a non-Generic class."; let description = [{ @@ -418,7 +418,7 @@ // ----- -def SPIRV_GenericCastToPtrExplicitOp : SPIRV_Op<"GenericCastToPtrExplicit", [NoSideEffect]> { +def SPIRV_GenericCastToPtrExplicitOp : SPIRV_Op<"GenericCastToPtrExplicit", [Pure]> { let summary = [{ Attempts to explicitly convert Pointer to Storage storage-class pointer value. diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td @@ -19,7 +19,7 @@ // ----- -def SPIRV_CompositeConstructOp : SPIRV_Op<"CompositeConstruct", [NoSideEffect]> { +def SPIRV_CompositeConstructOp : SPIRV_Op<"CompositeConstruct", [Pure]> { let summary = [{ Construct a new composite object from a set of constituent objects. }]; @@ -73,7 +73,7 @@ // ----- def SPIRV_CompositeExtractOp : SPIRV_Op<"CompositeExtract", - [NoSideEffect, UsableInSpecConstantOp]> { + [Pure, UsableInSpecConstantOp]> { let summary = "Extract a part of a composite object."; let description = [{ @@ -124,7 +124,7 @@ // ----- def SPIRV_CompositeInsertOp : SPIRV_Op<"CompositeInsert", - [NoSideEffect, UsableInSpecConstantOp]> { + [Pure, UsableInSpecConstantOp]> { let summary = [{ Make a copy of a composite object, while modifying one part of it. }]; @@ -176,7 +176,7 @@ // ----- def SPIRV_VectorExtractDynamicOp : SPIRV_Op<"VectorExtractDynamic", [ - NoSideEffect, + Pure, TypesMatchWith<"type of 'result' matches element type of 'vector'", "vector", "result", "$_self.cast().getElementType()">]> { @@ -224,7 +224,7 @@ // ----- def SPIRV_VectorInsertDynamicOp : SPIRV_Op<"VectorInsertDynamic", [ - NoSideEffect, + Pure, TypesMatchWith< "type of 'component' matches element type of 'vector'", "vector", "component", @@ -288,7 +288,7 @@ // ----- def SPIRV_VectorShuffleOp : SPIRV_Op<"VectorShuffle", [ - NoSideEffect, AllElementTypesMatch<["vector1", "vector2", "result"]>]> { + Pure, AllElementTypesMatch<["vector1", "vector2", "result"]>]> { let summary = [{ Select arbitrary components from two vectors to make a new vector. }]; diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td @@ -22,7 +22,7 @@ // ----- def SPIRV_BranchOp : SPIRV_Op<"Branch", [ - DeclareOpInterfaceMethods, InFunctionScope, NoSideEffect, + DeclareOpInterfaceMethods, InFunctionScope, Pure, Terminator]> { let summary = "Unconditional branch to target block."; @@ -79,7 +79,7 @@ def SPIRV_BranchConditionalOp : SPIRV_Op<"BranchConditional", [ AttrSizedOperandSegments, DeclareOpInterfaceMethods, - InFunctionScope, NoSideEffect, Terminator]> { + InFunctionScope, Pure, Terminator]> { let summary = [{ If Condition is true, branch to true block, otherwise branch to false block. @@ -313,7 +313,7 @@ // ----- -def SPIRV_MergeOp : SPIRV_Op<"mlir.merge", [NoSideEffect, Terminator]> { +def SPIRV_MergeOp : SPIRV_Op<"mlir.merge", [Pure, Terminator]> { let summary = "A special terminator for merging a structured selection/loop."; let description = [{ @@ -337,7 +337,7 @@ // ----- -def SPIRV_ReturnOp : SPIRV_Op<"Return", [InFunctionScope, NoSideEffect, +def SPIRV_ReturnOp : SPIRV_Op<"Return", [InFunctionScope, Pure, Terminator]> { let summary = "Return with no value from a function with void return type."; @@ -382,7 +382,7 @@ // ----- -def SPIRV_ReturnValueOp : SPIRV_Op<"ReturnValue", [InFunctionScope, NoSideEffect, +def SPIRV_ReturnValueOp : SPIRV_Op<"ReturnValue", [InFunctionScope, Pure, Terminator]> { let summary = "Return a value from a function."; diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td @@ -16,7 +16,7 @@ // ----- def SPIRV_NVCooperativeMatrixLengthOp : SPIRV_NvVendorOp<"CooperativeMatrixLength", - [NoSideEffect]> { + [Pure]> { let summary = "See extension SPV_NV_cooperative_matrix"; let description = [{ @@ -137,7 +137,7 @@ // ----- def SPIRV_NVCooperativeMatrixMulAddOp : SPIRV_NvVendorOp<"CooperativeMatrixMulAdd", - [NoSideEffect, AllTypesMatch<["c", "result"]>]> { + [Pure, AllTypesMatch<["c", "result"]>]> { let summary = "See extension SPV_NV_cooperative_matrix"; let description = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td @@ -35,7 +35,7 @@ // Base class for GL unary ops. class SPIRV_GLUnaryOp traits = []> : - SPIRV_GLOp { + SPIRV_GLOp { let arguments = (ins SPIRV_ScalarOrVectorOf:$operand @@ -60,7 +60,7 @@ // Base class for GL binary ops. class SPIRV_GLBinaryOp traits = []> : - SPIRV_GLOp { + SPIRV_GLOp { let arguments = (ins SPIRV_ScalarOrVectorOf:$lhs, @@ -86,7 +86,7 @@ // Base class for GL ternary ops. class SPIRV_GLTernaryArithmeticOp traits = []> : - SPIRV_GLOp { + SPIRV_GLOp { let arguments = (ins SPIRV_ScalarOrVectorOf:$x, @@ -1077,7 +1077,7 @@ // ---- -def SPIRV_GLFrexpStructOp : SPIRV_GLOp<"FrexpStruct", 52, [NoSideEffect]> { +def SPIRV_GLFrexpStructOp : SPIRV_GLOp<"FrexpStruct", 52, [Pure]> { let summary = "Splits x into two components such that x = significand * 2^exponent"; let description = [{ @@ -1132,7 +1132,7 @@ def SPIRV_GLLdexpOp : SPIRV_GLOp<"Ldexp", 53, [ - NoSideEffect, AllTypesMatch<["x", "y"]>]> { + Pure, AllTypesMatch<["x", "y"]>]> { let summary = "Builds y such that y = significand * 2^exponent"; let description = [{ @@ -1184,7 +1184,7 @@ def SPIRV_GLFMixOp : SPIRV_GLOp<"FMix", 46, [ - NoSideEffect, AllTypesMatch<["x", "y", "a", "result"]>]> { + Pure, AllTypesMatch<["x", "y", "a", "result"]>]> { let summary = "Builds the linear blend of x and y"; let description = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td @@ -18,7 +18,7 @@ // ----- def SPIRV_GroupBroadcastOp : SPIRV_Op<"GroupBroadcast", - [NoSideEffect, + [Pure, AllTypesMatch<["value", "result"]>]> { let summary = [{ Broadcast the Value of the invocation identified by the local id LocalId diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td @@ -19,7 +19,7 @@ // ----- -def SPIRV_ImageDrefGatherOp : SPIRV_Op<"ImageDrefGather", [NoSideEffect]> { +def SPIRV_ImageDrefGatherOp : SPIRV_Op<"ImageDrefGather", [Pure]> { let summary = "Gathers the requested depth-comparison from four texels."; let description = [{ @@ -86,7 +86,7 @@ // ----- -def SPIRV_ImageQuerySizeOp : SPIRV_Op<"ImageQuerySize", [NoSideEffect]> { +def SPIRV_ImageQuerySizeOp : SPIRV_Op<"ImageQuerySize", [Pure]> { let summary = "Query the dimensions of Image, with no level of detail."; let description = [{ @@ -144,7 +144,7 @@ // ----- def SPIRV_ImageOp : SPIRV_Op<"Image", - [NoSideEffect, + [Pure, TypesMatchWith<"type of 'result' matches image type of 'sampledimage'", "sampledimage", "result", "$_self.cast().getImageType()">]> { diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td @@ -16,7 +16,7 @@ // ----- def SPIRV_INTELJointMatrixWorkItemLengthOp : SPIRV_IntelVendorOp<"JointMatrixWorkItemLength", - [NoSideEffect]> { + [Pure]> { let summary = "See extension SPV_INTEL_joint_matrix"; let description = [{ @@ -120,7 +120,7 @@ // ----- def SPIRV_INTELJointMatrixMadOp : SPIRV_IntelVendorOp<"JointMatrixMad", - [NoSideEffect, AllTypesMatch<["c", "result"]>]> { + [Pure, AllTypesMatch<["c", "result"]>]> { let summary = "See extension SPV_INTEL_joint_matrix"; let description = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td @@ -22,7 +22,7 @@ // Result type is SPIRV_Bool. SPIRV_BinaryOp, UsableInSpecConstantOp]> { let summary = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td @@ -16,7 +16,7 @@ // ----- -def SPIRV_MatrixTimesMatrixOp : SPIRV_Op<"MatrixTimesMatrix", [NoSideEffect]> { +def SPIRV_MatrixTimesMatrixOp : SPIRV_Op<"MatrixTimesMatrix", [Pure]> { let summary = "Linear-algebraic multiply of LeftMatrix X RightMatrix."; let description = [{ @@ -70,7 +70,7 @@ // ----- -def SPIRV_MatrixTimesScalarOp : SPIRV_Op<"MatrixTimesScalar", [NoSideEffect]> { +def SPIRV_MatrixTimesScalarOp : SPIRV_Op<"MatrixTimesScalar", [Pure]> { let summary = "Scale a floating-point matrix."; let description = [{ @@ -132,7 +132,7 @@ // ----- -def SPIRV_TransposeOp : SPIRV_Op<"Transpose", [NoSideEffect]> { +def SPIRV_TransposeOp : SPIRV_Op<"Transpose", [Pure]> { let summary = "Transpose a matrix."; let description = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td @@ -18,7 +18,7 @@ // ----- -def SPIRV_AccessChainOp : SPIRV_Op<"AccessChain", [NoSideEffect]> { +def SPIRV_AccessChainOp : SPIRV_Op<"AccessChain", [Pure]> { let summary = "Create a pointer into a composite object."; let description = [{ @@ -132,7 +132,7 @@ // ----- -def SPIRV_InBoundsPtrAccessChainOp : SPIRV_Op<"InBoundsPtrAccessChain", [NoSideEffect]> { +def SPIRV_InBoundsPtrAccessChainOp : SPIRV_Op<"InBoundsPtrAccessChain", [Pure]> { let summary = [{ Has the same semantics as OpPtrAccessChain, with the addition that the resulting pointer is known to point within the base object. @@ -235,7 +235,7 @@ // ----- -def SPIRV_PtrAccessChainOp : SPIRV_Op<"PtrAccessChain", [NoSideEffect]> { +def SPIRV_PtrAccessChainOp : SPIRV_Op<"PtrAccessChain", [Pure]> { let summary = [{ Has the same semantics as OpAccessChain, with the addition of the Element operand. diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td @@ -56,7 +56,7 @@ // ----- -def SPIRV_UndefOp : SPIRV_Op<"Undef", [NoSideEffect]> { +def SPIRV_UndefOp : SPIRV_Op<"Undef", [Pure]> { let summary = "Make an intermediate object whose value is undefined."; let description = [{ diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td @@ -92,7 +92,7 @@ // ----- def SPIRV_GroupNonUniformBroadcastOp : SPIRV_Op<"GroupNonUniformBroadcast", - [NoSideEffect, AllTypesMatch<["value", "result"]>]> { + [Pure, AllTypesMatch<["value", "result"]>]> { let summary = [{ Result is the Value of the invocation identified by the id Id to all active invocations in the group. @@ -667,7 +667,7 @@ // ----- def SPIRV_GroupNonUniformShuffleOp : SPIRV_Op<"GroupNonUniformShuffle", - [NoSideEffect, AllTypesMatch<["value", "result"]>]> { + [Pure, AllTypesMatch<["value", "result"]>]> { let summary = [{ Result is the Value of the invocation identified by the id Id. }]; @@ -719,7 +719,7 @@ // ----- def SPIRV_GroupNonUniformShuffleDownOp : SPIRV_Op<"GroupNonUniformShuffleDown", - [NoSideEffect, AllTypesMatch<["value", "result"]>]> { + [Pure, AllTypesMatch<["value", "result"]>]> { let summary = [{ Result is the Value of the invocation identified by the current invocation’s id within the group + Delta. @@ -774,7 +774,7 @@ // ----- def SPIRV_GroupNonUniformShuffleUpOp : SPIRV_Op<"GroupNonUniformShuffleUp", - [NoSideEffect, AllTypesMatch<["value", "result"]>]> { + [Pure, AllTypesMatch<["value", "result"]>]> { let summary = [{ Result is the Value of the invocation identified by the current invocation’s id within the group - Delta. @@ -828,7 +828,7 @@ // ----- def SPIRV_GroupNonUniformShuffleXorOp : SPIRV_Op<"GroupNonUniformShuffleXor", - [NoSideEffect, AllTypesMatch<["value", "result"]>]> { + [Pure, AllTypesMatch<["value", "result"]>]> { let summary = [{ Result is the Value of the invocation identified by the current invocation’s id within the group xor’ed with Mask. diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td @@ -27,7 +27,7 @@ def SPIRV_AddressOfOp : SPIRV_Op<"mlir.addressof", [DeclareOpInterfaceMethods, - InFunctionScope, NoSideEffect]> { + InFunctionScope, Pure]> { let summary = "Get the address of a global variable."; let description = [{ @@ -75,7 +75,7 @@ def SPIRV_ConstantOp : SPIRV_Op<"Constant", [ConstantLike, DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = [{ Declare a new integer-type or floating-point-type scalar constant. }]; @@ -520,7 +520,7 @@ // ----- -def SPIRV_ReferenceOfOp : SPIRV_Op<"mlir.referenceof", [NoSideEffect]> { +def SPIRV_ReferenceOfOp : SPIRV_Op<"mlir.referenceof", [Pure]> { let summary = "Reference a specialization constant."; let description = [{ @@ -669,7 +669,7 @@ // ----- def SPIRV_SpecConstantOperationOp : SPIRV_Op<"SpecConstantOperation", [ - NoSideEffect, InFunctionScope, + Pure, InFunctionScope, SingleBlockImplicitTerminator<"YieldOp">]> { let summary = [{ Declare a new specialization constant that results from doing an operation. @@ -760,7 +760,7 @@ // ----- def SPIRV_YieldOp : SPIRV_Op<"mlir.yield", [ - HasParent<"SpecConstantOperationOp">, NoSideEffect, Terminator]> { + HasParent<"SpecConstantOperationOp">, Pure, Terminator]> { let summary = [{ Yields the result computed in `spirv.SpecConstantOperation`'s region back to the parent op. diff --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td --- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td +++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td @@ -32,7 +32,7 @@ Op; def Shape_AddOp : Shape_Op<"add", - [Commutative, NoSideEffect, + [Commutative, Pure, DeclareOpInterfaceMethods]> { let summary = "Addition of sizes and indices"; let description = [{ @@ -61,7 +61,7 @@ let hasVerifier = 1; } -def Shape_BroadcastOp : Shape_Op<"broadcast", [Commutative, NoSideEffect]> { +def Shape_BroadcastOp : Shape_Op<"broadcast", [Commutative, Pure]> { let summary = "Returns the broadcasted output shape of two or more inputs"; let description = [{ Returns the broadcasted shape for input shapes or extent tensors. The rest @@ -108,7 +108,7 @@ } def Shape_ConstShapeOp : Shape_Op<"const_shape", - [ConstantLike, NoSideEffect, DeclareOpInterfaceMethods]> { + [ConstantLike, Pure, DeclareOpInterfaceMethods]> { let summary = "Creates a constant shape or extent tensor"; let description = [{ Creates a constant shape or extent tensor. The individual extents are given @@ -136,7 +136,7 @@ def Shape_ConstSizeOp : Shape_Op<"const_size", [ ConstantLike, - NoSideEffect, + Pure, DeclareOpInterfaceMethods ]> { let summary = "Creates a constant of type `shape.size`"; @@ -157,7 +157,7 @@ let hasFolder = 1; } -def Shape_DivOp : Shape_Op<"div", [NoSideEffect, +def Shape_DivOp : Shape_Op<"div", [Pure, DeclareOpInterfaceMethods]> { let summary = "Division of sizes and indices"; let description = [{ @@ -194,7 +194,7 @@ }]; } -def Shape_ShapeEqOp : Shape_Op<"shape_eq", [NoSideEffect, Commutative]> { +def Shape_ShapeEqOp : Shape_Op<"shape_eq", [Pure, Commutative]> { let summary = "Returns whether the input shapes or extent tensors are equal"; let description = [{ Takes one or more shape or extent tensor operands and determines whether @@ -217,7 +217,7 @@ let hasFolder = 1; } -def Shape_FromExtentsOp : Shape_Op<"from_extents", [NoSideEffect]> { +def Shape_FromExtentsOp : Shape_Op<"from_extents", [Pure]> { let summary = "Creates a shape from extents"; let description = [{ Creates a shape from multiple SSA values representing the extents of @@ -238,7 +238,7 @@ let hasFolder = 1; } -def Shape_FromExtentTensorOp : Shape_Op<"from_extent_tensor", [NoSideEffect]> { +def Shape_FromExtentTensorOp : Shape_Op<"from_extent_tensor", [Pure]> { let summary = "Creates a shape from a tensor of extents"; let description = [{ Creates a shape from a 1D integral tensor of extents. The rank of the @@ -286,7 +286,7 @@ } def Shape_RankOp : Shape_Op<"rank", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "Gets the rank of a shape"; let description = [{ Returns the rank of the shape or extent tensor, i.e. the number of extents. @@ -309,7 +309,7 @@ } def Shape_ToExtentTensorOp : Shape_Op<"to_extent_tensor", [ - DeclareOpInterfaceMethods, NoSideEffect + DeclareOpInterfaceMethods, Pure ]> { let summary = "Creates a dimension tensor from a shape"; let description = [{ @@ -329,7 +329,7 @@ } def Shape_DimOp : Shape_Op<"dim", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "Gets the specified extent from the shape of a shaped input"; let description = [{ Gets the extent indexed by `dim` from the shape of the `value` operand. If @@ -364,7 +364,7 @@ } def Shape_GetExtentOp : Shape_Op<"get_extent", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "Gets the specified extent from a shape or extent tensor"; let description = [{ Gets the extent indexed by `dim` from the `shape` operand. If the shape is @@ -393,7 +393,7 @@ let hasVerifier = 1; } -def Shape_IndexToSizeOp : Shape_Op<"index_to_size", [NoSideEffect]> { +def Shape_IndexToSizeOp : Shape_Op<"index_to_size", [Pure]> { let summary = "Converts a standard index to a shape size"; let description = [{ Converts a standard index to a `shape.size`. This operation and its @@ -413,7 +413,7 @@ } def Shape_MaxOp : Shape_Op<"max", - [Commutative, NoSideEffect, + [Commutative, Pure, DeclareOpInterfaceMethods]> { let summary = "Elementwise maximum"; let description = [{ @@ -487,7 +487,7 @@ } def Shape_MinOp : Shape_Op<"min", - [Commutative, NoSideEffect, + [Commutative, Pure, DeclareOpInterfaceMethods]> { let summary = "Elementwise minimum"; let description = [{ @@ -514,7 +514,7 @@ } def Shape_MulOp : Shape_Op<"mul", - [Commutative, NoSideEffect, + [Commutative, Pure, DeclareOpInterfaceMethods]> { let summary = "Multiplication of sizes and indices"; let description = [{ @@ -544,7 +544,7 @@ } def Shape_NumElementsOp : Shape_Op<"num_elements", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "Returns the number of elements for a given shape"; let description = [{ Returns the number of elements for a given shape which is the product of its @@ -615,7 +615,7 @@ } def Shape_ShapeOfOp : Shape_Op<"shape_of", - [NoSideEffect, DeclareOpInterfaceMethods]> { + [Pure, DeclareOpInterfaceMethods]> { let summary = "Returns shape of a value or shaped type operand"; let description = [{ @@ -639,7 +639,7 @@ }]; } -def Shape_ValueOfOp : Shape_Op<"value_of", [NoSideEffect]> { +def Shape_ValueOfOp : Shape_Op<"value_of", [Pure]> { let summary = "Returns value of a !shape.value_shape operand"; let description = [{ @@ -655,7 +655,7 @@ } def Shape_SizeToIndexOp : Shape_Op<"size_to_index", [ - DeclareOpInterfaceMethods, NoSideEffect + DeclareOpInterfaceMethods, Pure ]> { let summary = "Casts between index types of the shape and standard dialect"; let description = [{ @@ -674,7 +674,7 @@ let hasCanonicalizer = 1; } -def Shape_ValueAsShapeOp : Shape_Op<"value_as_shape", [NoSideEffect]> { +def Shape_ValueAsShapeOp : Shape_Op<"value_as_shape", [Pure]> { let summary = "Returns value as a shape"; let description = [{ @@ -699,7 +699,7 @@ let assemblyFormat = "$arg attr-dict `:` type($arg) `->` type($result)"; } -def Shape_WithOp : Shape_Op<"with_shape", [NoSideEffect]> { +def Shape_WithOp : Shape_Op<"with_shape", [Pure]> { let summary = "Returns ValueShape with given shape"; let description = [{ Returns ValueShape with the shape updated to match the shape operand. That @@ -744,7 +744,7 @@ def Shape_YieldOp : Shape_Op<"yield", [HasParent<"ReduceOp, FunctionLibraryOp">, - NoSideEffect, + Pure, ReturnLike, Terminator]> { let summary = "Returns the value to parent op"; @@ -774,7 +774,7 @@ let results = (outs Shape_ShapeOrSizeType:$output); } -def Shape_SplitAtOp : Shape_Op<"split_at", [NoSideEffect]> { +def Shape_SplitAtOp : Shape_Op<"split_at", [Pure]> { let summary = "Splits a shape at a given index"; let description = [{ Splits a shape at a given dimension `index`, returning two shapes. @@ -806,7 +806,7 @@ let hasFolder = 1; } -def Shape_ConcatOp : Shape_Op<"concat", [NoSideEffect]> { +def Shape_ConcatOp : Shape_Op<"concat", [Pure]> { let summary = "Concatenates two shapes"; let description = [{ Creates a shape whose dimensions consist of first the dimensions from `lhs` @@ -834,7 +834,7 @@ // TODO: Move the code below and witnesses to a different file. def Shape_AnyOp : Shape_Op<"any", [Commutative, - NoSideEffect]> { + Pure]> { let summary = "Return any combination of the input shapes"; let description = [{ This operation takes multiple input shapes or extent tensors and returns @@ -859,7 +859,7 @@ let hasFolder = 1; } -def Shape_AssumingAllOp : Shape_Op<"assuming_all", [Commutative, NoSideEffect]> { +def Shape_AssumingAllOp : Shape_Op<"assuming_all", [Commutative, Pure]> { let summary = "Return a logical AND of all witnesses"; let description = [{ Used to simplify constraints as any single failing precondition is enough @@ -893,7 +893,7 @@ def Shape_AssumingOp : Shape_Op<"assuming", [ SingleBlockImplicitTerminator<"AssumingYieldOp">, DeclareOpInterfaceMethods, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "Execute the region"; let description = [{ Executes the region assuming all witnesses are true. @@ -925,7 +925,7 @@ } def Shape_AssumingYieldOp : Shape_Op<"assuming_yield", - [NoSideEffect, ReturnLike, Terminator, HasParent<"AssumingOp">]> { + [Pure, ReturnLike, Terminator, HasParent<"AssumingOp">]> { let summary = "Yield operation"; let description = [{ This yield operation represents a return operation within the @@ -996,7 +996,7 @@ let hasFolder = 1; } -def Shape_ConstWitnessOp : Shape_Op<"const_witness", [ConstantLike, NoSideEffect]> { +def Shape_ConstWitnessOp : Shape_Op<"const_witness", [ConstantLike, Pure]> { let summary = "An operation that returns a statically known witness value"; let description = [{ This operation represents a statically known witness result. This can be @@ -1165,7 +1165,7 @@ } def Shape_ReturnOp : Shape_Op<"return", - [NoSideEffect, HasParent<"FuncOp">, ReturnLike, Terminator]> { + [Pure, HasParent<"FuncOp">, ReturnLike, Terminator]> { let summary = "Shape function return operation"; let description = [{ The `shape.return` operation represents a return operation within a function. diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -25,7 +25,7 @@ // Sparse Tensor Operations. //===----------------------------------------------------------------------===// -def SparseTensor_NewOp : SparseTensor_Op<"new", [NoSideEffect]>, +def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>, Arguments<(ins AnyType:$source)>, Results<(outs AnySparseTensor:$result)> { string summary = "Materializes a new sparse tensor from given source"; @@ -49,7 +49,7 @@ } def SparseTensor_ConvertOp : SparseTensor_Op<"convert", - [NoSideEffect, SameOperandsAndResultElementType]>, + [Pure, SameOperandsAndResultElementType]>, Arguments<(ins AnyTensor:$source)>, Results<(outs AnyTensor:$dest)> { string summary = "Converts between different tensor types"; @@ -90,7 +90,7 @@ let hasVerifier = 1; } -def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>, +def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [Pure]>, Arguments<(ins AnySparseTensor:$tensor, IndexAttr:$dimension)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extracts pointers array at given dimension from a tensor"; @@ -114,7 +114,7 @@ let hasVerifier = 1; } -def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [NoSideEffect]>, +def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [Pure]>, Arguments<(ins AnySparseTensor:$tensor, IndexAttr:$dimension)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extracts indices array at given dimension from a tensor"; @@ -138,7 +138,7 @@ let hasVerifier = 1; } -def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>, +def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [Pure]>, Arguments<(ins AnySparseTensor:$tensor)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extracts numerical values array from a tensor"; @@ -161,7 +161,7 @@ let hasVerifier = 1; } -def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate", [NoSideEffect]>, +def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate", [Pure]>, Arguments<(ins Variadic:$inputs, IndexAttr:$dimension)>, Results<(outs AnyRankedTensor:$result)> { @@ -467,7 +467,7 @@ // Sparse Tensor Syntax Operations. //===----------------------------------------------------------------------===// -def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [NoSideEffect]>, +def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]>, Arguments<(ins AnyType:$x, AnyType:$y, UnitAttr:$left_identity, UnitAttr:$right_identity)>, Results<(outs AnyType:$output)> { let summary = "Binary set operation utilized within linalg.generic"; @@ -581,7 +581,7 @@ let hasVerifier = 1; } -def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [NoSideEffect]>, +def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]>, Arguments<(ins AnyType:$x)>, Results<(outs AnyType:$output)> { let summary = "Unary set operation utilized within linalg.generic"; @@ -659,7 +659,7 @@ let hasVerifier = 1; } -def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [NoSideEffect, SameOperandsAndResultType]>, +def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [Pure, SameOperandsAndResultType]>, Arguments<(ins AnyType:$x, AnyType:$y, AnyType:$identity)>, Results<(outs AnyType:$output)> { let summary = "Custom reduction operation utilized within linalg.generic"; @@ -708,7 +708,7 @@ let hasVerifier = 1; } -def SparseTensor_SelectOp : SparseTensor_Op<"select", [NoSideEffect, SameOperandsAndResultType]>, +def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResultType]>, Arguments<(ins AnyType:$x)>, Results<(outs AnyType:$output)> { let summary = "Select operation utilized within linalg.generic"; @@ -768,7 +768,7 @@ let hasVerifier = 1; } -def SparseTensor_YieldOp : SparseTensor_Op<"yield", [NoSideEffect, Terminator]>, +def SparseTensor_YieldOp : SparseTensor_Op<"yield", [Pure, Terminator]>, Arguments<(ins Optional:$result)> { let summary = "Yield from sparse_tensor set-like operations"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -49,7 +49,7 @@ def Tensor_CastOp : Tensor_Op<"cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, - NoSideEffect + Pure ]> { let summary = "tensor cast operation"; let description = [{ @@ -87,7 +87,7 @@ def Tensor_DimOp : Tensor_Op<"dim", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, ShapedDimOpInterface]> { let summary = "dimension index operation"; let description = [{ @@ -147,7 +147,7 @@ //===----------------------------------------------------------------------===// def Tensor_EmptyOp : Tensor_Op<"empty", - [NoSideEffect, + [Pure, DeclareOpInterfaceMethods]> { let summary = "empty tensor operation"; @@ -207,7 +207,7 @@ def Tensor_ExtractOp : Tensor_Op<"extract", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, TypesMatchWith<"result type matches element type of tensor", "tensor", "result", "$_self.cast().getElementType()">]> { @@ -252,7 +252,7 @@ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, AttrSizedOperandSegments, - NoSideEffect, + Pure, OffsetSizeAndStrideOpInterface ]> { let summary = "extract slice operation"; @@ -446,7 +446,7 @@ def Tensor_FromElementsOp : Tensor_Op<"from_elements", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, TypesMatchWith<"operand types match result element type", "result", "elements", "SmallVector(" "$_self.cast().getNumElements(), " @@ -492,7 +492,7 @@ def Tensor_GatherOp : Tensor_Op<"gather", [ DeclareOpInterfaceMethods, - NoSideEffect + Pure ]> { let summary = "gather a subset of a tensor at specified indices"; let description = [{ @@ -631,7 +631,7 @@ def Tensor_GenerateOp : Tensor_Op<"generate", [ DeclareOpInterfaceMethods, - RecursiveSideEffects, + RecursiveMemoryEffects, DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"mlir::tensor::YieldOp">]> { let summary = "Creates a dynamically sized tensor from elements"; @@ -678,7 +678,7 @@ def Tensor_InsertOp : Tensor_Op<"insert", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, TypesMatchWith<"result type matches type of dest", "dest", "result", "$_self.cast()">, @@ -733,7 +733,7 @@ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, AttrSizedOperandSegments, - NoSideEffect, + Pure, OffsetSizeAndStrideOpInterface, TypesMatchWith<"expected result type to match dest type", "dest", "result", "$_self"> @@ -872,7 +872,7 @@ def Tensor_RankOp : Tensor_Op<"rank", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "rank operation"; let description = [{ The `tensor.rank` operation takes a tensor operand and returns its rank. @@ -898,7 +898,7 @@ def Tensor_ReshapeOp: Tensor_Op<"reshape", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "tensor reshape operation"; let description = [{ The `reshape` operation converts a tensor from one type to an equivalent @@ -963,7 +963,7 @@ class Tensor_ReassociativeReshapeOp traits = []> : Tensor_Op, - NoSideEffect])>, + Pure])>, Arguments<(ins AnyTensor:$src, IndexListArrayAttr:$reassociation)>, Results<(outs AnyTensor:$result)> { @@ -1116,7 +1116,7 @@ def Tensor_PadOp : Tensor_Op<"pad", [ DeclareOpInterfaceMethods, AttrSizedOperandSegments, - NoSideEffect, + Pure, SingleBlockImplicitTerminator<"mlir::tensor::YieldOp">]> { let summary = "tensor pad operation"; let description = [{ @@ -1338,7 +1338,7 @@ This op does not create a new value, which allows maintaining a clean separation between the subset and full tensor. - Note that we cannot mark this operation as pure (NoSideEffects), even + Note that we cannot mark this operation as pure (Pures), even though it has no side effects, because it will get DCEd during canonicalization. @@ -1459,7 +1459,7 @@ def Tensor_ScatterOp : Tensor_Op<"scatter", [ DeclareOpInterfaceMethods, - NoSideEffect + Pure ]> { let summary = "scatter a tensor into a destination tensor at specified indices"; @@ -1600,7 +1600,7 @@ def Tensor_SplatOp : Tensor_Op<"splat", [ DeclareOpInterfaceMethods, - NoSideEffect, + Pure, TypesMatchWith<"operand type matches element type of result", "aggregate", "input", "$_self.cast().getElementType()"> @@ -1647,7 +1647,7 @@ //===----------------------------------------------------------------------===// def Tensor_YieldOp : Tensor_Op<"yield", - [NoSideEffect, ReturnLike, Terminator, + [Pure, ReturnLike, Terminator, HasParent<"::mlir::tensor::GenerateOp, ::mlir::tensor::PadOp">]> { let summary = "Yield a value from a region"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -35,7 +35,7 @@ def Tosa_ArgMaxOp : Tosa_Op<"argmax", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Perform argmax on the input."; let description = [{ @@ -59,7 +59,7 @@ def Tosa_AvgPool2dOp : Tosa_Op<"avg_pool2d", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Performs max pooling on the input."; let description = [{ @@ -91,7 +91,7 @@ def Tosa_Conv2DOp : Tosa_Op<"conv2d", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "2D Convolution Operator"; let description = [{ @@ -124,7 +124,7 @@ def Tosa_Conv3DOp : Tosa_Op<"conv3d", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "3D Convolution operator"; let description = [{ @@ -156,7 +156,7 @@ def Tosa_DepthwiseConv2DOp : Tosa_Op<"depthwise_conv2d", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Depthwise 2D Convolution operator"; let description = [{ @@ -189,7 +189,7 @@ def Tosa_FullyConnectedOp : Tosa_Op<"fully_connected", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Fully Connected operator"; let description = [{ @@ -217,7 +217,7 @@ def Tosa_MatMulOp : Tosa_Op<"matmul", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Matrix multiplication with bias"; let description = [{ @@ -245,7 +245,7 @@ def Tosa_MaxPool2dOp : Tosa_Op<"max_pool2d", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Performs max pooling on the input."; let description = [{ @@ -276,7 +276,7 @@ def Tosa_TransposeConv2DOp : Tosa_Op<"transpose_conv2d", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Transpose 2D Convolution operator."; let description = [{ @@ -313,7 +313,7 @@ def Tosa_ClampOp : Tosa_Op<"clamp", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Computes clamp(features, min, max)."; let description = [{ @@ -345,7 +345,7 @@ def Tosa_SigmoidOp : Tosa_Op<"sigmoid", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Computes elementwise sigmoid of input."; let description = [{ @@ -371,7 +371,7 @@ def Tosa_TanhOp : Tosa_Op<"tanh", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Computes elementwise hyperbolic tangent of input"; let description = [{ @@ -402,7 +402,7 @@ def Tosa_AddOp : Tosa_Op<"add", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Elementwise addition operator"; let description = [{ @@ -429,7 +429,7 @@ def Tosa_ArithmeticRightShiftOp : Tosa_Op<"arithmetic_right_shift", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Elementwise Arithmetic Right Shift"; let description = [{ @@ -454,7 +454,7 @@ def Tosa_BitwiseAndOp : Tosa_Op<"bitwise_and", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Bitwise AND operator"; let description = [{ @@ -478,7 +478,7 @@ def Tosa_BitwiseOrOp : Tosa_Op<"bitwise_or", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Bitwise OR operator"; let description = [{ @@ -502,7 +502,7 @@ def Tosa_BitwiseXorOp : Tosa_Op<"bitwise_xor", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Bitwise XOR operator"; let description = [{ @@ -526,7 +526,7 @@ def Tosa_DivOp : Tosa_Op<"div", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Integer divide operator"; let description = [{ @@ -552,7 +552,7 @@ def Tosa_LogicalAndOp : Tosa_Op<"logical_and", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, Commutative, NoSideEffect]> { + ResultsBroadcastableShape, Commutative, Pure]> { let summary = "Returns the truth value of x AND y element-wise."; let description = [{ @@ -576,7 +576,7 @@ def Tosa_LogicalLeftShiftOp : Tosa_Op<"logical_left_shift", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Elementwise Logical Left Shift"; let description = [{ @@ -600,7 +600,7 @@ def Tosa_LogicalRightShiftOp : Tosa_Op<"logical_right_shift", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Elementwise Logical Right Shift"; let description = [{ @@ -624,7 +624,7 @@ def Tosa_LogicalOrOp : Tosa_Op<"logical_or", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, Commutative, NoSideEffect]> { + ResultsBroadcastableShape, Commutative, Pure]> { let summary = "Returns the truth value of x OR y element-wise."; let description = [{ @@ -648,7 +648,7 @@ def Tosa_LogicalXorOp : Tosa_Op<"logical_xor", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, Commutative, NoSideEffect]> { + ResultsBroadcastableShape, Commutative, Pure]> { let summary = "Returns the truth value of x XOR y element-wise."; let description = [{ @@ -672,7 +672,7 @@ def Tosa_MaximumOp : Tosa_Op<"maximum", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Elementwise Maximum"; let description = [{ @@ -696,7 +696,7 @@ def Tosa_MinimumOp : Tosa_Op<"minimum", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Elementwise Minimum"; let description = [{ @@ -720,7 +720,7 @@ def Tosa_MulOp : Tosa_Op<"mul", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect, Commutative]> { + ResultsBroadcastableShape, Pure, Commutative]> { let summary = "Multiplication operator"; let description = [{ @@ -748,7 +748,7 @@ def Tosa_PowOp : Tosa_Op<"pow", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Computes the power of one value to another."; let description = [{ @@ -772,7 +772,7 @@ def Tosa_SubOp : Tosa_Op<"sub", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Elementwise subtraction operator"; let description = [{ @@ -798,7 +798,7 @@ def Tosa_TableOp : Tosa_Op<"table", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Table lookup op"; let description = [{ @@ -841,7 +841,7 @@ def Tosa_AbsOp : Tosa_Op<"abs", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise abs op"; let description = [{ @@ -863,7 +863,7 @@ def Tosa_BitwiseNotOp : Tosa_Op<"bitwise_not", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Bitwise NOT operator"; let description = [{ @@ -885,7 +885,7 @@ def Tosa_CeilOp : Tosa_Op<"ceil", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise ceil op"; let description = [{ @@ -907,7 +907,7 @@ def Tosa_ClzOp : Tosa_Op<"clz", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise count leading zero op"; let description = [{ @@ -929,7 +929,7 @@ def Tosa_ExpOp : Tosa_Op<"exp", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise exp op"; let description = [{ @@ -951,7 +951,7 @@ def Tosa_FloorOp : Tosa_Op<"floor", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise floor op"; let description = [{ @@ -973,7 +973,7 @@ def Tosa_LogOp : Tosa_Op<"log", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise log op"; let description = [{ @@ -995,7 +995,7 @@ def Tosa_LogicalNotOp : Tosa_Op<"logical_not", [ DeclareOpInterfaceMethods, - NoSideEffect, SameOperandsAndResultType]> { + Pure, SameOperandsAndResultType]> { let summary = "Returns the truth value of NOT x element-wise."; let description = [{ @@ -1017,7 +1017,7 @@ def Tosa_NegateOp : Tosa_Op<"negate", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise negate op"; let description = [{ @@ -1042,7 +1042,7 @@ def Tosa_ReciprocalOp : Tosa_Op<"reciprocal", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise reciprocal op"; let description = [{ @@ -1065,7 +1065,7 @@ def Tosa_RsqrtOp : Tosa_Op<"rsqrt", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Elementwise 1/sqrt op"; let description = [{ @@ -1093,7 +1093,7 @@ //===----------------------------------------------------------------------===// def Tosa_SelectOp : Tosa_Op<"select", [ DeclareOpInterfaceMethods, NoSideEffect]> { + ["inferReturnTypeComponents"]>, Pure]> { let summary = "Elementwise select operator"; let description = [{ @@ -1122,7 +1122,7 @@ // Operator: equal //===----------------------------------------------------------------------===// def Tosa_EqualOp : Tosa_Op<"equal", [InferTensorType, ResultsBroadcastableShape, - Commutative, NoSideEffect]> { + Commutative, Pure]> { let summary = "Returns the truth value of (x == y) element-wise."; let description = [{ @@ -1153,7 +1153,7 @@ def Tosa_GreaterOp : Tosa_Op<"greater", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Returns the truth value of (x > y) element-wise."; let description = [{ @@ -1178,7 +1178,7 @@ def Tosa_GreaterEqualOp : Tosa_Op<"greater_equal", [ DeclareOpInterfaceMethods, - ResultsBroadcastableShape, NoSideEffect]> { + ResultsBroadcastableShape, Pure]> { let summary = "Returns the truth value of (x >= y) element-wise."; let description = [{ @@ -1208,7 +1208,7 @@ def Tosa_ReduceAllOp : Tosa_Op<"reduce_all", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reduce All operator"; let description = [{ @@ -1233,7 +1233,7 @@ def Tosa_ReduceAnyOp : Tosa_Op<"reduce_any", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reduce Any operator"; let description = [{ @@ -1258,7 +1258,7 @@ def Tosa_ReduceMaxOp : Tosa_Op<"reduce_max", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reduce Max operator"; let description = [{ @@ -1283,7 +1283,7 @@ def Tosa_ReduceMinOp : Tosa_Op<"reduce_min", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reduce Min operator"; let description = [{ @@ -1308,7 +1308,7 @@ def Tosa_ReduceProdOp : Tosa_Op<"reduce_prod", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reduce Prod operator"; let description = [{ @@ -1333,7 +1333,7 @@ def Tosa_ReduceSumOp : Tosa_Op<"reduce_sum", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reduce Sum operator"; let description = [{ @@ -1363,7 +1363,7 @@ def Tosa_ConcatOp : Tosa_Op<"concat", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Concatenates tensors along one dimension."; let description = [{ @@ -1389,7 +1389,7 @@ def Tosa_PadOp : Tosa_Op<"pad", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Pads a tensor with value specified."; let description = [{ @@ -1420,7 +1420,7 @@ def Tosa_ReshapeOp: Tosa_Op<"reshape", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Reshape operator"; let description = [{ @@ -1447,7 +1447,7 @@ //===----------------------------------------------------------------------===// def Tosa_ReverseOp: Tosa_Op<"reverse", [ DeclareOpInterfaceMethods, NoSideEffect]> { + ["inferReturnTypeComponents"]>, Pure]> { let summary = "Reverse operator"; let description = [{ @@ -1473,7 +1473,7 @@ //===----------------------------------------------------------------------===// def Tosa_SliceOp: Tosa_Op<"slice", [ DeclareOpInterfaceMethods, NoSideEffect]> { + ["inferReturnTypeComponents"]>, Pure]> { let summary = "Slice operator"; let description = [{ @@ -1501,7 +1501,7 @@ def Tosa_TileOp: Tosa_Op<"tile", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Tile operator"; let description = [{ @@ -1525,7 +1525,7 @@ def Tosa_TransposeOp : Tosa_Op<"transpose", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Transpose operator"; let description = [{ @@ -1556,7 +1556,7 @@ def Tosa_GatherOp : Tosa_Op<"gather", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Gather operation,"; let description = [{ @@ -1580,7 +1580,7 @@ def Tosa_ScatterOp : Tosa_Op<"scatter", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Scatter operation,"; let description = [{ @@ -1610,7 +1610,7 @@ def Tosa_ResizeOp : Tosa_Op<"resize", [ DeclareOpInterfaceMethods, - NoSideEffect]> { + Pure]> { let summary = "Resize operation, supports various resize/upsample modes"; @@ -1646,7 +1646,7 @@ //===----------------------------------------------------------------------===// // Operator: cast //===----------------------------------------------------------------------===// -def Tosa_CastOp: Tosa_Op<"cast", [NoSideEffect, +def Tosa_CastOp: Tosa_Op<"cast", [Pure, DeclareOpInterfaceMethods]> { @@ -1688,7 +1688,7 @@ //===----------------------------------------------------------------------===// // Operator: rescale //===----------------------------------------------------------------------===// -def Tosa_RescaleOp: Tosa_Op<"rescale", [NoSideEffect, +def Tosa_RescaleOp: Tosa_Op<"rescale", [Pure, DeclareOpInterfaceMethods]> { let summary = "Tosa rescale operator"; @@ -1736,7 +1736,7 @@ //===----------------------------------------------------------------------===// // Operator: const //===----------------------------------------------------------------------===// -def Tosa_ConstOp : Tosa_Op<"const", [ConstantLike, NoSideEffect, +def Tosa_ConstOp : Tosa_Op<"const", [ConstantLike, Pure, FirstAttrDerivedResultType]> { let summary = "Constant op."; @@ -1758,7 +1758,7 @@ //===----------------------------------------------------------------------===// // Operator: identity //===----------------------------------------------------------------------===// -def Tosa_IdentityOp: Tosa_Op<"identity", [NoSideEffect, +def Tosa_IdentityOp: Tosa_Op<"identity", [Pure, DeclareOpInterfaceMethods]> { let summary = "Identity operator"; @@ -1820,7 +1820,7 @@ DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"YieldOp">, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "Conditional if operator"; let description = [{ @@ -1854,7 +1854,7 @@ DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"YieldOp">, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "output = input; While (Cond(output)) {output = Body(output)}"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td @@ -24,7 +24,7 @@ include "mlir/Dialect/Tosa/IR/TosaTypesBase.td" include "mlir/Dialect/Tosa/IR/TosaOpBase.td" -def Tosa_ApplyScaleOp: Tosa_Op<"apply_scale", [NoSideEffect] # ElementwiseMappable.traits> { +def Tosa_ApplyScaleOp: Tosa_Op<"apply_scale", [Pure] # ElementwiseMappable.traits> { let summary = "Rescale scalar operator for Tosa tensor operators"; let description = [{ @@ -53,7 +53,7 @@ //===----------------------------------------------------------------------===// def Tosa_YieldOp : Tosa_Op<"yield", [ Terminator, - NoSideEffect]> { + Pure]> { let summary = "yield operator"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td --- a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td +++ b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td @@ -380,7 +380,7 @@ def WithPDLPatternsOp : TransformDialectOp<"with_pdl_patterns", [DeclareOpInterfaceMethods, NoTerminator, - OpAsmOpInterface, PossibleTopLevelTransformOpTrait, RecursiveSideEffects, + OpAsmOpInterface, PossibleTopLevelTransformOpTrait, RecursiveMemoryEffects, SymbolTable]> { let summary = "Contains PDL patterns available for use in transforms"; let description = [{ diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -83,7 +83,7 @@ // than the current set: {*, +}. def Vector_ContractionOp : Vector_Op<"contract", [ - NoSideEffect, + Pure, PredOpTrait<"lhs and rhs have same element type", TCopVTEtIsSameAs<0, 1>>, PredOpTrait<"third operand acc and result have same element type", TCresVTEtIsSameAsOpBase<0, 2>>, @@ -281,7 +281,7 @@ } def Vector_ReductionOp : - Vector_Op<"reduction", [NoSideEffect, + Vector_Op<"reduction", [Pure, PredOpTrait<"source operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, DeclareOpInterfaceMethods, @@ -332,7 +332,7 @@ } def Vector_MultiDimReductionOp : - Vector_Op<"multi_reduction", [NoSideEffect, + Vector_Op<"multi_reduction", [Pure, AllTypesMatch<["dest", "acc"]>, PredOpTrait<"source operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, @@ -403,7 +403,7 @@ } def Vector_BroadcastOp : - Vector_Op<"broadcast", [NoSideEffect, + Vector_Op<"broadcast", [Pure, PredOpTrait<"source operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>]>, Arguments<(ins AnyType:$source)>, @@ -451,7 +451,7 @@ } def Vector_ShuffleOp : - Vector_Op<"shuffle", [NoSideEffect, + Vector_Op<"shuffle", [Pure, PredOpTrait<"first operand v1 and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, PredOpTrait<"second operand v2 and result have same element type", @@ -517,7 +517,7 @@ } def Vector_ExtractElementOp : - Vector_Op<"extractelement", [NoSideEffect, + Vector_Op<"extractelement", [Pure, TypesMatchWith<"result type matches element type of vector operand", "vector", "result", "$_self.cast().getElementType()">]>, @@ -565,7 +565,7 @@ } def Vector_ExtractOp : - Vector_Op<"extract", [NoSideEffect, + Vector_Op<"extract", [Pure, PredOpTrait<"operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, DeclareOpInterfaceMethods]>, @@ -604,7 +604,7 @@ def Vector_FMAOp : Op, + Pure, AllTypesMatch<["lhs", "rhs", "acc", "result"]>, DeclareOpInterfaceMethods ] # ElementwiseMappable.traits>, Arguments<(ins AnyVectorOfAnyRank:$lhs, @@ -634,7 +634,7 @@ } def Vector_InsertElementOp : - Vector_Op<"insertelement", [NoSideEffect, + Vector_Op<"insertelement", [Pure, TypesMatchWith<"source operand type matches element type of result", "result", "source", "$_self.cast().getElementType()">, @@ -682,7 +682,7 @@ } def Vector_InsertOp : - Vector_Op<"insert", [NoSideEffect, + Vector_Op<"insert", [Pure, PredOpTrait<"source operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, AllTypesMatch<["dest", "res"]>]>, @@ -725,7 +725,7 @@ } def Vector_InsertStridedSliceOp : - Vector_Op<"insert_strided_slice", [NoSideEffect, + Vector_Op<"insert_strided_slice", [Pure, PredOpTrait<"operand #0 and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, AllTypesMatch<["dest", "res"]>]>, @@ -784,7 +784,7 @@ } def Vector_OuterProductOp : - Vector_Op<"outerproduct", [NoSideEffect, + Vector_Op<"outerproduct", [Pure, PredOpTrait<"lhs operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, PredOpTrait<"rhs operand and result have same element type", @@ -875,7 +875,7 @@ // TODO: Add transformation which decomposes ReshapeOp into an optimized // sequence of vector rotate/shuffle/select operations. def Vector_ReshapeOp : - Vector_Op<"reshape", [AttrSizedOperandSegments, NoSideEffect]>, + Vector_Op<"reshape", [AttrSizedOperandSegments, Pure]>, Arguments<(ins AnyVector:$vector, Variadic:$input_shape, Variadic:$output_shape, I64ArrayAttr:$fixed_vector_sizes)>, @@ -995,7 +995,7 @@ } def Vector_ExtractStridedSliceOp : - Vector_Op<"extract_strided_slice", [NoSideEffect, + Vector_Op<"extract_strided_slice", [Pure, PredOpTrait<"operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>]>, Arguments<(ins AnyVector:$vector, I64ArrayAttr:$offsets, @@ -1911,7 +1911,7 @@ } def Vector_ShapeCastOp : - Vector_Op<"shape_cast", [NoSideEffect]>, + Vector_Op<"shape_cast", [Pure]>, Arguments<(ins AnyVector:$source)>, Results<(outs AnyVector:$result)> { let summary = "shape_cast casts between vector shapes"; @@ -1963,7 +1963,7 @@ } def Vector_BitCastOp : - Vector_Op<"bitcast", [NoSideEffect, AllRanksMatch<["source", "result"]>]>, + Vector_Op<"bitcast", [Pure, AllRanksMatch<["source", "result"]>]>, Arguments<(ins AnyVectorOfAnyRank:$source)>, Results<(outs AnyVectorOfAnyRank:$result)>{ let summary = "bitcast casts between vectors"; @@ -2003,7 +2003,7 @@ } def Vector_TypeCastOp : - Vector_Op<"type_cast", [NoSideEffect, ViewLikeOpInterface]>, + Vector_Op<"type_cast", [Pure, ViewLikeOpInterface]>, Arguments<(ins StaticShapeMemRefOf<[AnyType]>:$memref)>, Results<(outs AnyMemRef:$result)> { let summary = "type_cast op converts a scalar memref to a vector memref"; @@ -2050,7 +2050,7 @@ } def Vector_ConstantMaskOp : - Vector_Op<"constant_mask", [NoSideEffect]>, + Vector_Op<"constant_mask", [Pure]>, Arguments<(ins I64ArrayAttr:$mask_dim_sizes)>, Results<(outs VectorOfAnyRankOf<[I1]>)> { let summary = "creates a constant vector mask"; @@ -2092,7 +2092,7 @@ } def Vector_CreateMaskOp : - Vector_Op<"create_mask", [NoSideEffect]>, + Vector_Op<"create_mask", [Pure]>, Arguments<(ins Variadic:$operands)>, Results<(outs VectorOfAnyRankOf<[I1]>)> { let summary = "creates a vector mask"; @@ -2131,7 +2131,7 @@ } def Vector_MaskOp : Vector_Op<"mask", [ - SingleBlockImplicitTerminator<"vector::YieldOp">, RecursiveSideEffects, + SingleBlockImplicitTerminator<"vector::YieldOp">, RecursiveMemoryEffects, NoRegionArguments ]> { let summary = "Predicates a maskable vector operation"; @@ -2203,7 +2203,7 @@ } def Vector_TransposeOp : - Vector_Op<"transpose", [NoSideEffect, + Vector_Op<"transpose", [Pure, DeclareOpInterfaceMethods, PredOpTrait<"operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>]>, @@ -2300,7 +2300,7 @@ /// This may seem redundant with vector.contract but it serves the purposes of /// more progressive lowering and localized type conversion on the path: /// `vector<...x...xf32> -> vector<...xf32> -> !llvm<... x float>`. -def Vector_MatmulOp : Vector_Op<"matrix_multiply", [NoSideEffect, +def Vector_MatmulOp : Vector_Op<"matrix_multiply", [Pure, PredOpTrait<"lhs operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, PredOpTrait<"rhs operand and result have same element type", @@ -2364,7 +2364,7 @@ /// This may seem redundant with vector.transpose but it serves the purposes of /// more progressive lowering and localized type conversion on the path: /// `vector<...x...xf32> -> vector<...xf32> -> !llvm<... x float>`. -def Vector_FlatTransposeOp : Vector_Op<"flat_transpose", [NoSideEffect, +def Vector_FlatTransposeOp : Vector_Op<"flat_transpose", [Pure, PredOpTrait<"source operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>]>, Arguments<( @@ -2407,7 +2407,7 @@ //===----------------------------------------------------------------------===// def Vector_SplatOp : Vector_Op<"splat", [ - NoSideEffect, + Pure, TypesMatchWith<"operand type matches element type of result", "aggregate", "input", "$_self.cast().getElementType()"> @@ -2450,7 +2450,7 @@ // call to the function. For that, it might be useful to have a // 'vector.scale.global' and a 'vector.scale.local' operation. def VectorScaleOp : Vector_Op<"vscale", - [NoSideEffect]> { + [Pure]> { let summary = "Load vector scale size"; let description = [{ The `vscale` op returns the scale of the scalable vectors, a positive @@ -2473,7 +2473,7 @@ //===----------------------------------------------------------------------===// def Vector_ScanOp : - Vector_Op<"scan", [NoSideEffect, + Vector_Op<"scan", [Pure, AllTypesMatch<["source", "dest"]>, AllTypesMatch<["initial_value", "accumulated_value"]> ]>, Arguments<(ins Vector_CombiningKindAttr:$kind, @@ -2528,7 +2528,7 @@ } def Vector_YieldOp : Vector_Op<"yield", [ - NoSideEffect, ReturnLike, Terminator]> { + Pure, ReturnLike, Terminator]> { let summary = "Terminates and yields values from vector regions."; let description = [{ "vector.yield" yields an SSA value from the Vector dialect op region and @@ -2552,7 +2552,7 @@ def Vector_WarpExecuteOnLane0Op : Vector_Op<"warp_execute_on_lane_0", [DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"vector::YieldOp">, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let summary = "Executes operations in the associated region on thread #0 of a" "SPMD program"; let description = [{ diff --git a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td --- a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td +++ b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td @@ -54,7 +54,7 @@ // MaskCompressOp //----------------------------------------------------------------------------// -def MaskCompressOp : AVX512_Op<"mask.compress", [NoSideEffect, +def MaskCompressOp : AVX512_Op<"mask.compress", [Pure, // TODO: Support optional arguments in `AllTypesMatch`. "type($src)" could // then be removed from assemblyFormat. AllTypesMatch<["a", "dst"]>, @@ -91,7 +91,7 @@ } def MaskCompressIntrOp : AVX512_IntrOverloadedOp<"mask.compress", [ - NoSideEffect, + Pure, AllTypesMatch<["a", "src", "res"]>, TypesMatchWith<"`k` has the same number of bits as elements in `res`", "res", "k", @@ -109,7 +109,7 @@ // MaskRndScaleOp //----------------------------------------------------------------------------// -def MaskRndScaleOp : AVX512_Op<"mask.rndscale", [NoSideEffect, +def MaskRndScaleOp : AVX512_Op<"mask.rndscale", [Pure, AllTypesMatch<["src", "a", "dst"]>, TypesMatchWith<"imm has the same number of bits as elements in dst", "dst", "imm", @@ -142,7 +142,7 @@ } def MaskRndScalePSIntrOp : AVX512_IntrOp<"mask.rndscale.ps.512", 1, [ - NoSideEffect, + Pure, AllTypesMatch<["src", "a", "res"]>]> { let arguments = (ins VectorOfLengthAndType<[16], [F32]>:$src, I32:$k, @@ -152,7 +152,7 @@ } def MaskRndScalePDIntrOp : AVX512_IntrOp<"mask.rndscale.pd.512", 1, [ - NoSideEffect, + Pure, AllTypesMatch<["src", "a", "res"]>]> { let arguments = (ins VectorOfLengthAndType<[8], [F64]>:$src, I32:$k, @@ -165,7 +165,7 @@ // MaskScaleFOp //----------------------------------------------------------------------------// -def MaskScaleFOp : AVX512_Op<"mask.scalef", [NoSideEffect, +def MaskScaleFOp : AVX512_Op<"mask.scalef", [Pure, AllTypesMatch<["src", "a", "b", "dst"]>, TypesMatchWith<"k has the same number of bits as elements in dst", "dst", "k", @@ -199,7 +199,7 @@ } def MaskScaleFPSIntrOp : AVX512_IntrOp<"mask.scalef.ps.512", 1, [ - NoSideEffect, + Pure, AllTypesMatch<["src", "a", "b", "res"]>]> { let arguments = (ins VectorOfLengthAndType<[16], [F32]>:$src, VectorOfLengthAndType<[16], [F32]>:$a, @@ -209,7 +209,7 @@ } def MaskScaleFPDIntrOp : AVX512_IntrOp<"mask.scalef.pd.512", 1, [ - NoSideEffect, + Pure, AllTypesMatch<["src", "a", "b", "res"]>]> { let arguments = (ins VectorOfLengthAndType<[8], [F64]>:$src, VectorOfLengthAndType<[8], [F64]>:$a, @@ -222,7 +222,7 @@ // Vp2IntersectOp //----------------------------------------------------------------------------// -def Vp2IntersectOp : AVX512_Op<"vp2intersect", [NoSideEffect, +def Vp2IntersectOp : AVX512_Op<"vp2intersect", [Pure, AllTypesMatch<["a", "b"]>, TypesMatchWith<"k1 has the same number of bits as elements in a", "a", "k1", @@ -260,13 +260,13 @@ } def Vp2IntersectDIntrOp : AVX512_IntrOp<"vp2intersect.d.512", 2, [ - NoSideEffect]> { + Pure]> { let arguments = (ins VectorOfLengthAndType<[16], [I32]>:$a, VectorOfLengthAndType<[16], [I32]>:$b); } def Vp2IntersectQIntrOp : AVX512_IntrOp<"vp2intersect.q.512", 2, [ - NoSideEffect]> { + Pure]> { let arguments = (ins VectorOfLengthAndType<[8], [I64]>:$a, VectorOfLengthAndType<[8], [I64]>:$b); } @@ -295,14 +295,14 @@ // AVX Rsqrt //----------------------------------------------------------------------------// -def RsqrtOp : AVX_Op<"rsqrt", [NoSideEffect, SameOperandsAndResultType]> { +def RsqrtOp : AVX_Op<"rsqrt", [Pure, SameOperandsAndResultType]> { let summary = "Rsqrt"; let arguments = (ins VectorOfLengthAndType<[8], [F32]>:$a); let results = (outs VectorOfLengthAndType<[8], [F32]>:$b); let assemblyFormat = "$a attr-dict `:` type($a)"; } -def RsqrtIntrOp : AVX_IntrOp<"rsqrt.ps.256", 1, [NoSideEffect, +def RsqrtIntrOp : AVX_IntrOp<"rsqrt.ps.256", 1, [Pure, SameOperandsAndResultType]> { let arguments = (ins VectorOfLengthAndType<[8], [F32]>:$a); } @@ -311,7 +311,7 @@ // AVX Dot //----------------------------------------------------------------------------// -def DotOp : AVX_LowOp<"dot", [NoSideEffect, SameOperandsAndResultType]> { +def DotOp : AVX_LowOp<"dot", [Pure, SameOperandsAndResultType]> { let summary = "Dot"; let description = [{ Computes the 4-way dot products of the lower and higher parts of the source @@ -335,7 +335,7 @@ let assemblyFormat = "$a `,` $b attr-dict `:` type($res)"; } -def DotIntrOp : AVX_IntrOp<"dp.ps.256", 1, [NoSideEffect, +def DotIntrOp : AVX_IntrOp<"dp.ps.256", 1, [Pure, AllTypesMatch<["a", "b", "res"]>]> { let arguments = (ins VectorOfLengthAndType<[8], [F32]>:$a, VectorOfLengthAndType<[8], [F32]>:$b, I8:$c); diff --git a/mlir/include/mlir/IR/BuiltinOps.td b/mlir/include/mlir/IR/BuiltinOps.td --- a/mlir/include/mlir/IR/BuiltinOps.td +++ b/mlir/include/mlir/IR/BuiltinOps.td @@ -99,7 +99,7 @@ //===----------------------------------------------------------------------===// def UnrealizedConversionCastOp : Builtin_Op<"unrealized_conversion_cast", [ - DeclareOpInterfaceMethods, NoSideEffect + DeclareOpInterfaceMethods, Pure ]> { let summary = "An unrealized conversion from one set of types to another"; let description = [{ diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td --- a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td @@ -98,7 +98,7 @@ static bool hasNoEffect(Operation *op) { if (auto interface = dyn_cast<}] # name # [{>(op)) return interface.hasNoEffect(); - return op->hasTrait<::mlir::OpTrait::HasRecursiveSideEffects>(); + return op->hasTrait<::mlir::OpTrait::HasRecursiveMemoryEffects>(); } /// Collect all of the effect instances that operate on the provided value diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaces.h b/mlir/include/mlir/Interfaces/SideEffectInterfaces.h --- a/mlir/include/mlir/Interfaces/SideEffectInterfaces.h +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaces.h @@ -194,18 +194,65 @@ }; } // namespace SideEffects +namespace Speculation { +/// This enum is returned from the `getSpeculatability` method in the +/// `ConditionallySpeculatable` op interface. +enum class Speculatability { + /// The Operation in question cannot be speculatively executed. This could be + /// because it may invoke undefined behavior or have other side effects. + NotSpeculatable, + + // The Operation in question can be speculatively executed. It does not have + // any side effects or undefined behavior. + Speculatable, + + // The Operation in question can be speculatively executed if all the + // operations in all attached regions can also be speculatively executed. + RecursivelySpeculatable, +}; + +constexpr auto NotSpeculatable = Speculatability::NotSpeculatable; +constexpr auto Speculatable = Speculatability::Speculatable; +constexpr auto RecursivelySpeculatable = + Speculatability::RecursivelySpeculatable; +} // namespace Speculation + //===----------------------------------------------------------------------===// // SideEffect Traits //===----------------------------------------------------------------------===// namespace OpTrait { -/// This trait indicates that the side effects of an operation includes the +/// This trait indicates that the memory effects of an operation includes the /// effects of operations nested within its regions. If the operation has no /// derived effects interfaces, the operation itself can be assumed to have no -/// side effects. +/// memory effects. template -class HasRecursiveSideEffects - : public TraitBase {}; +class HasRecursiveMemoryEffects + : public TraitBase {}; + +/// This trait marks an op (which must be tagged as implementing the +/// ConditionallySpeculatable interface) as being recursively speculatable. +/// This means that said op can be speculated only if all the instructions in +/// all the regions attached to the op can be speculated. +template +struct RecursivelySpeculatableImplTrait + : public TraitBase { + + Speculation::Speculatability getSpeculatability() { + return Speculation::RecursivelySpeculatable; + } +}; + +/// This trait marks an op (which must be tagged as implementing the +/// ConditionallySpeculatable interface) as being always speculatable. +template +struct AlwaysSpeculatableImplTrait + : public TraitBase { + + Speculation::Speculatability getSpeculatability() { + return Speculation::Speculatable; + } +}; } // namespace OpTrait //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaces.td b/mlir/include/mlir/Interfaces/SideEffectInterfaces.td --- a/mlir/include/mlir/Interfaces/SideEffectInterfaces.td +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaces.td @@ -77,9 +77,59 @@ // Effect Traits //===----------------------------------------------------------------------===// -// Op has no side effect. -def NoSideEffect : MemoryEffects<[]>; +// Op has no effect on memory but may have undefined behavior. +def NoMemoryEffect : MemoryEffects<[]>; + // Op has recursively computed side effects. -def RecursiveSideEffects : NativeOpTrait<"HasRecursiveSideEffects">; +def RecursiveMemoryEffects : NativeOpTrait<"HasRecursiveMemoryEffects">; + +//===----------------------------------------------------------------------===// +// Speculation +//===----------------------------------------------------------------------===// + +// Used to inject an implementation of getSpeculatability. Users should not use +// this directly. +def RecursivelySpeculatableImplTrait + : NativeOpTrait<"RecursivelySpeculatableImplTrait">; + +// Used to inject an implementation of getSpeculatability. Users should not use +// this directly. +def AlwaysSpeculatableImplTrait + : NativeOpTrait<"AlwaysSpeculatableImplTrait">; + +// This op interface enables Op authors to inject custom logic to determine +// whether an Operation can be speculatively executed. Ops that implement this +// interface need to implement the custom logic in the `getSpeculatability` method. +// For instance, the `getSpeculatability` for a specific op may check the attributes +// or input types to determine whether that specific Operation is speculatable. +def ConditionallySpeculatable : OpInterface<"ConditionallySpeculatable"> { + let description = [{ + An interface used to query information about the speculability of an + operation. + }]; + let cppNamespace = "::mlir"; + + let methods = [ + InterfaceMethod<[{ + Returns value indicating whether the specific operation in question can + be speculatively executed. Please see the documentation on the + Speculatability enum to know how to interpret the return value. + }], + "::mlir::Speculation::Speculatability", "getSpeculatability", (ins)> + ]; +} + +// Marks an Operation as always speculatable. +def AlwaysSpeculatable : TraitList<[ + ConditionallySpeculatable, AlwaysSpeculatableImplTrait]>; + +// Marks an Operation as speculatable only if all the operations in all attached +// regions are also speculatable. +def RecursivelySpeculatable : TraitList<[ + ConditionallySpeculatable, RecursivelySpeculatableImplTrait]>; + +// Always speculatable operation that does not touch memory. These operations +// are always legal to hoist or sink. +def Pure : TraitList<[AlwaysSpeculatable, NoMemoryEffect]>; #endif // MLIR_INTERFACES_SIDEEFFECTS diff --git a/mlir/include/mlir/Transforms/SideEffectUtils.h b/mlir/include/mlir/Transforms/SideEffectUtils.h --- a/mlir/include/mlir/Transforms/SideEffectUtils.h +++ b/mlir/include/mlir/Transforms/SideEffectUtils.h @@ -13,17 +13,24 @@ class Operation; -/// Returns true if the given operation is side-effect free. +/// Returns true if the given operation is free of memory effects. /// -/// An operation is side-effect free if its implementation of +/// An operation is free of memory effects if its implementation of /// `MemoryEffectOpInterface` indicates that it has no memory effects. For -/// example, it may implement `NoSideEffect` in ODS. Alternatively, if the -/// operation `HasRecursiveSideEffects`, then it is side-effect free if all of -/// its nested operations are side-effect free. +/// example, it may implement `NoMemoryEffect` in ODS. Alternatively, if the +/// operation has the `HasRecursiveMemoryEffects` trait, then it is free of +/// memory effects if all of its nested operations are free of memory effects. /// -/// If the operation has both, then it is side-effect free if both conditions -/// are satisfied. -bool isSideEffectFree(Operation *op); +/// If the operation has both, then it is free of memory effects if both +/// conditions are satisfied. +bool isMemoryEffectFree(Operation *op); + +/// Returns true if the given operation is speculatable, i.e. has no undefined +/// behavior or other side effects. +/// +/// An operation can indicate that it is speculatable by implementing the +/// getSpeculatability hook in the ConditionallySpeculatable op interface. +bool isSpeculatable(Operation *op); } // end namespace mlir diff --git a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp --- a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp +++ b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp @@ -348,7 +348,7 @@ ModRefResult LocalAliasAnalysis::getModRef(Operation *op, Value location) { // Check to see if this operation relies on nested side effects. - if (op->hasTrait()) { + if (op->hasTrait()) { // TODO: To check recursive operations we need to check all of the nested // operations, which can result in a quadratic number of queries. We should // introduce some caching of some kind to help alleviate this, especially as diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -743,7 +743,7 @@ return; } - if (op->hasTrait()) { + if (op->hasTrait()) { // Recurse into the regions for this op and check whether the internal // operations may have the side effect `EffectType` on memOp. for (Region ®ion : op->getRegions()) diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -378,7 +378,7 @@ static bool isOpItselfPotentialAutomaticAllocation(Operation *op) { // This op itself doesn't create a stack allocation, // the inner allocation should be handled separately. - if (op->hasTrait()) + if (op->hasTrait()) return false; MemoryEffectOpInterface interface = dyn_cast(op); if (!interface) diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp @@ -224,7 +224,7 @@ static bool canBeHoisted(Operation *op, function_ref definedOutside) { return llvm::all_of(op->getOperands(), definedOutside) && - isSideEffectFree(op) && op->getNumRegions() == 0; + isMemoryEffectFree(op) && op->getNumRegions() == 0; } /// Return a value yielded by `warpOp` which statifies the filter lamdba @@ -555,7 +555,7 @@ // There must be no op with a side effect after writeOp. Operation *nextOp = writeOp.getOperation(); while ((nextOp = nextOp->getNextNode())) - if (!isSideEffectFree(nextOp)) + if (!isMemoryEffectFree(nextOp)) return failure(); if (!llvm::all_of(writeOp->getOperands(), [&](Value value) { diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp @@ -114,7 +114,7 @@ users.append(subView->getUsers().begin(), subView->getUsers().end()); continue; } - if (isSideEffectFree(user)) + if (isMemoryEffectFree(user)) continue; if (user == write.getOperation()) continue; @@ -200,7 +200,7 @@ users.append(subView->getUsers().begin(), subView->getUsers().end()); continue; } - if (isSideEffectFree(user) || isa(user)) + if (isMemoryEffectFree(user) || isa(user)) continue; if (auto write = dyn_cast(user)) { // If there is a write, but we can prove that it is disjoint we can ignore diff --git a/mlir/lib/Interfaces/SideEffectInterfaces.cpp b/mlir/lib/Interfaces/SideEffectInterfaces.cpp --- a/mlir/lib/Interfaces/SideEffectInterfaces.cpp +++ b/mlir/lib/Interfaces/SideEffectInterfaces.cpp @@ -47,7 +47,8 @@ // If the operation has recursive effects, push all of the nested operations // on to the stack to consider. - bool hasRecursiveEffects = op->hasTrait(); + bool hasRecursiveEffects = + op->hasTrait(); if (hasRecursiveEffects) { for (Region ®ion : op->getRegions()) { for (auto &block : region) { diff --git a/mlir/lib/Transforms/ControlFlowSink.cpp b/mlir/lib/Transforms/ControlFlowSink.cpp --- a/mlir/lib/Transforms/ControlFlowSink.cpp +++ b/mlir/lib/Transforms/ControlFlowSink.cpp @@ -44,7 +44,7 @@ // Sink side-effect free operations. numSunk = controlFlowSink( regionsToSink, domInfo, - [](Operation *op, Region *) { return isSideEffectFree(op); }, + [](Operation *op, Region *) { return isMemoryEffectFree(op); }, [](Operation *op, Region *region) { // Move the operation to the beginning of the region's entry block. // This guarantees the preservation of SSA dominance of all of the diff --git a/mlir/lib/Transforms/Utils/LoopInvariantCodeMotionUtils.cpp b/mlir/lib/Transforms/Utils/LoopInvariantCodeMotionUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopInvariantCodeMotionUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopInvariantCodeMotionUtils.cpp @@ -100,6 +100,8 @@ [&](Value value, Region *) { return loopLike.isDefinedOutsideOfLoop(value); }, - [&](Operation *op, Region *) { return isSideEffectFree(op); }, + [&](Operation *op, Region *) { + return isMemoryEffectFree(op) && isSpeculatable(op); + }, [&](Operation *op, Region *) { loopLike.moveOutOfLoop(op); }); } diff --git a/mlir/lib/Transforms/Utils/SideEffectUtils.cpp b/mlir/lib/Transforms/Utils/SideEffectUtils.cpp --- a/mlir/lib/Transforms/Utils/SideEffectUtils.cpp +++ b/mlir/lib/Transforms/Utils/SideEffectUtils.cpp @@ -12,15 +12,15 @@ using namespace mlir; -bool mlir::isSideEffectFree(Operation *op) { +bool mlir::isMemoryEffectFree(Operation *op) { if (auto memInterface = dyn_cast(op)) { // If the op has side-effects, it cannot be moved. if (!memInterface.hasNoEffect()) return false; // If the op does not have recursive side effects, then it can be moved. - if (!op->hasTrait()) + if (!op->hasTrait()) return true; - } else if (!op->hasTrait()) { + } else if (!op->hasTrait()) { // Otherwise, if the op does not implement the memory effect interface and // it does not have recursive side effects, then it cannot be known that the // op is moveable. @@ -30,7 +30,29 @@ // Recurse into the regions and ensure that all nested ops can also be moved. for (Region ®ion : op->getRegions()) for (Operation &op : region.getOps()) - if (!isSideEffectFree(&op)) + if (!isMemoryEffectFree(&op)) return false; return true; } + +bool mlir::isSpeculatable(Operation *op) { + auto conditionallySpeculatable = dyn_cast(op); + if (!conditionallySpeculatable) + return false; + + switch (conditionallySpeculatable.getSpeculatability()) { + case Speculation::RecursivelySpeculatable: + for (Region ®ion : op->getRegions()) { + for (Operation &op : region.getOps()) + if (!isSpeculatable(&op)) + return false; + } + return true; + + case Speculation::Speculatable: + return true; + + case Speculation::NotSpeculatable: + return false; + } +} diff --git a/mlir/test/Transforms/loop-invariant-code-motion.mlir b/mlir/test/Transforms/loop-invariant-code-motion.mlir --- a/mlir/test/Transforms/loop-invariant-code-motion.mlir +++ b/mlir/test/Transforms/loop-invariant-code-motion.mlir @@ -426,3 +426,80 @@ } : () -> () return } + +// ----- + +// CHECK-LABEL: test_always_speculatable_op +func.func @test_always_speculatable_op(%lb: index, %ub: index, %step: index) { + // CHECK: test.always_speculatable_op + // CHECK-NEXT: scf.for + scf.for %i = %lb to %ub step %step { + %val = "test.always_speculatable_op"() : () -> i32 + } + + return +} + +// CHECK-LABEL: test_never_speculatable_op +func.func @test_never_speculatable_op(%lb: index, %ub: index, %step: index) { + // CHECK: scf.for + // CHECK-NEXT: test.never_speculatable_op + scf.for %i = %lb to %ub step %step { + %val = "test.never_speculatable_op"() : () -> i32 + } + + return +} + +// CHECK-LABEL: test_conditionally_speculatable_op_success +func.func @test_conditionally_speculatable_op_success(%lb: index, %ub: index, %step: index) { + // CHECK: test.conditionally_speculatable_op + // CHECK-NEXT: scf.for + scf.for %i = %lb to %ub step %step { + %const_val = arith.constant 5 : i32 + %val = "test.conditionally_speculatable_op"(%const_val) : (i32) -> i32 + } + + return +} + +// CHECK-LABEL: test_conditionally_speculatable_op_failure +func.func @test_conditionally_speculatable_op_failure(%lb: index, %ub: index, %step: index, %arg: i32) { + // CHECK: scf.for + // CHECK-NEXT: test.conditionally_speculatable_op + %const_5 = arith.constant 5 : i32 + %non_const = arith.addi %arg, %const_5 : i32 + scf.for %i = %lb to %ub step %step { + %val = "test.conditionally_speculatable_op"(%non_const) : (i32) -> i32 + } + + return +} + +// CHECK-LABEL: test_recursively_speculatable_op_success +func.func @test_recursively_speculatable_op_success(%lb: index, %ub: index, %step: index, %arg: i32) { + // CHECK: test.recursively_speculatable_op + // CHECK: scf.for + scf.for %i = %lb to %ub step %step { + %val = "test.recursively_speculatable_op"()({ + %result = arith.addi %arg, %arg : i32 + test.region_yield %result : i32 + }) : () -> i32 + } + + return +} + +// CHECK-LABEL: test_recursively_speculatable_op_failure +func.func @test_recursively_speculatable_op_failure(%lb: index, %ub: index, %step: index, %arg: i32) { + // CHECK: scf.for + // CHECK-NEXT: test.recursively_speculatable_op + scf.for %i = %lb to %ub step %step { + %val = "test.recursively_speculatable_op"()({ + %result = "test.never_speculatable_op"() : () -> i32 + test.region_yield %result : i32 + }) : () -> i32 + } + + return +} diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -1183,7 +1183,7 @@ let hasCanonicalizer = 1; } -def TestOpConstant : TEST_Op<"constant", [ConstantLike, NoSideEffect]> { +def TestOpConstant : TEST_Op<"constant", [ConstantLike, NoMemoryEffect]> { let arguments = (ins AnyAttr:$value); let results = (outs AnyType); @@ -1197,8 +1197,8 @@ (OpS:$unused $input1, $input2)>; // Op for testing trivial removal via folding of op with inner ops and no uses. -def TestOpWithRegionFoldNoSideEffect : TEST_Op< - "op_with_region_fold_no_side_effect", [NoSideEffect]> { +def TestOpWithRegionFoldNoMemoryEffect : TEST_Op< + "op_with_region_fold_no_side_effect", [NoMemoryEffect]> { let regions = (region SizedRegion<1>:$region); } @@ -1238,28 +1238,28 @@ def TestIdempotentTraitOp : TEST_Op<"op_idempotent_trait", - [SameOperandsAndResultType, NoSideEffect, Idempotent]> { + [SameOperandsAndResultType, NoMemoryEffect, Idempotent]> { let arguments = (ins I32:$op1); let results = (outs I32); } def TestIdempotentTraitBinaryOp : TEST_Op<"op_idempotent_trait_binary", - [SameOperandsAndResultType, NoSideEffect, Idempotent]> { + [SameOperandsAndResultType, NoMemoryEffect, Idempotent]> { let arguments = (ins I32:$op1, I32:$op2); let results = (outs I32); } def TestInvolutionTraitNoOperationFolderOp : TEST_Op<"op_involution_trait_no_operation_fold", - [SameOperandsAndResultType, NoSideEffect, Involution]> { + [SameOperandsAndResultType, NoMemoryEffect, Involution]> { let arguments = (ins I32:$op1); let results = (outs I32); } def TestInvolutionTraitFailingOperationFolderOp : TEST_Op<"op_involution_trait_failing_operation_fold", - [SameOperandsAndResultType, NoSideEffect, Involution]> { + [SameOperandsAndResultType, NoMemoryEffect, Involution]> { let arguments = (ins I32:$op1); let results = (outs I32); let hasFolder = 1; @@ -1267,7 +1267,7 @@ def TestInvolutionTraitSuccesfulOperationFolderOp : TEST_Op<"op_involution_trait_succesful_operation_fold", - [SameOperandsAndResultType, NoSideEffect, Involution]> { + [SameOperandsAndResultType, NoMemoryEffect, Involution]> { let arguments = (ins I32:$op1); let results = (outs I32); let hasFolder = 1; @@ -2543,7 +2543,7 @@ //===----------------------------------------------------------------------===// def RegionYieldOp : TEST_Op<"region_yield", - [NoSideEffect, ReturnLike, Terminator]> { + [Pure, ReturnLike, Terminator]> { let description = [{ This operation is used in a region and yields the corresponding type for that operation. @@ -2599,7 +2599,7 @@ //===----------------------------------------------------------------------===// def RegionIfYieldOp : TEST_Op<"region_if_yield", - [NoSideEffect, ReturnLike, Terminator]> { + [NoMemoryEffect, ReturnLike, Terminator]> { let arguments = (ins Variadic:$results); let assemblyFormat = [{ $results `:` type($results) attr-dict @@ -2610,7 +2610,7 @@ [DeclareOpInterfaceMethods, SingleBlockImplicitTerminator<"RegionIfYieldOp">, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let description =[{ Represents an abstract if-then-else-join pattern. In this context, the then and else regions jump to the join region, which finally returns to its @@ -2641,7 +2641,7 @@ def AnyCondOp : TEST_Op<"any_cond", [DeclareOpInterfaceMethods, - RecursiveSideEffects]> { + RecursiveMemoryEffects]> { let results = (outs Variadic:$results); let regions = (region AnyRegion:$region); } @@ -2960,8 +2960,8 @@ // Test loop op with a graph region. def TestGraphLoopOp : TEST_Op<"graph_loop", - [LoopLikeOpInterface, NoSideEffect, - RecursiveSideEffects, SingleBlock, + [LoopLikeOpInterface, NoMemoryEffect, + RecursivelySpeculatable, SingleBlock, RegionKindInterface, HasOnlyGraphRegion]> { let arguments = (ins Variadic:$args); let results = (outs Variadic:$rets); @@ -2981,7 +2981,7 @@ //===----------------------------------------------------------------------===// def TestWithBoundsOp : TEST_Op<"with_bounds", [DeclareOpInterfaceMethods, - NoSideEffect]> { + NoMemoryEffect]> { let arguments = (ins IndexAttr:$umin, IndexAttr:$umax, IndexAttr:$smin, @@ -3005,7 +3005,7 @@ def TestIncrementOp : TEST_Op<"increment", [DeclareOpInterfaceMethods, - NoSideEffect]> { + NoMemoryEffect]> { let arguments = (ins Index:$value); let results = (outs Index:$result); @@ -3023,4 +3023,65 @@ let assemblyFormat = "attr-dict $value"; } + +//===----------------------------------------------------------------------===// +// Test ConditionallySpeculatable +//===----------------------------------------------------------------------===// + +def ConditionallySpeculatableOp : TEST_Op<"conditionally_speculatable_op", + [ConditionallySpeculatable, NoMemoryEffect]> { + let description = [{ + Op used to test conditional speculation. This op can be speculatively + executed if the input to it is an `arith.constant`. + }]; + + let arguments = (ins I32:$input); + let results = (outs I32:$result); + + let extraClassDeclaration = [{ + ::mlir::Speculation::Speculatability getSpeculatability(); + }]; + + let extraClassDefinition = [{ + ::mlir::Speculation::Speculatability + ConditionallySpeculatableOp::getSpeculatability() { + Operation* definingOp = getInput().getDefiningOp(); + return definingOp && isa<::mlir::arith::ConstantOp>(definingOp) ? + ::mlir::Speculation::Speculatable : ::mlir::Speculation::NotSpeculatable; + } + }]; +} + +def PureOp : TEST_Op<"always_speculatable_op", [Pure]> { + let description = [{ + Op used to test conditional speculation. This op can always be + speculatively executed. + }]; + let results = (outs I32:$result); +} + +def NeverSpeculatableOp : TEST_Op<"never_speculatable_op", [ConditionallySpeculatable]> { + let description = [{ + Op used to test conditional speculation. This op can never be + speculatively executed. + }]; + let results = (outs I32:$result); + + let extraClassDeclaration = [{ + ::mlir::Speculation::Speculatability getSpeculatability() { + return ::mlir::Speculation::NotSpeculatable; + } + }]; +} + +def RecursivelySpeculatableOp : TEST_Op<"recursively_speculatable_op", [ + RecursivelySpeculatable, RecursiveMemoryEffects]> { + let description = [{ + Op used to test conditional speculation. This op can be speculatively + executed only if all the ops in the attached region can be. + }]; + let results = (outs I32:$result); + let regions = (region SizedRegion<1>:$body); +} + #endif // TEST_OPS diff --git a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td --- a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td +++ b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td @@ -80,7 +80,7 @@ def TestAddTestExtensionOp : Op, - NoSideEffect]> { + NoMemoryEffect]> { let arguments = (ins StrAttr:$message); let assemblyFormat = "$message attr-dict"; let cppNamespace = "::mlir::test"; @@ -108,7 +108,7 @@ def TestRemoveTestExtensionOp : Op, - NoSideEffect]> { + NoMemoryEffect]> { let assemblyFormat = "attr-dict"; let cppNamespace = "::mlir::test"; } diff --git a/mlir/test/mlir-tblgen/gen-dialect-doc.td b/mlir/test/mlir-tblgen/gen-dialect-doc.td --- a/mlir/test/mlir-tblgen/gen-dialect-doc.td +++ b/mlir/test/mlir-tblgen/gen-dialect-doc.td @@ -13,7 +13,7 @@ }]; let cppNamespace = "NS"; } -def AOp : Op]>; +def AOp : Op]>; def TestAttr : DialectAttr> { let summary = "attribute summary"; @@ -31,7 +31,7 @@ // CHECK-NOT: [TOC] // CHECK: Traits: SingleBlockImplicitTerminator -// CHECK: Interfaces: NoSideEffect (MemoryEffectOpInterface) +// CHECK: Interfaces: NoMemoryEffect (MemoryEffectOpInterface) // CHECK: Effects: MemoryEffects::Effect{} // CHECK: ## Attribute constraint definition diff --git a/mlir/test/mlir-tblgen/llvm-intrinsics.td b/mlir/test/mlir-tblgen/llvm-intrinsics.td --- a/mlir/test/mlir-tblgen/llvm-intrinsics.td +++ b/mlir/test/mlir-tblgen/llvm-intrinsics.td @@ -21,7 +21,7 @@ // match the result type. // CHECK: [1] // It has no side effects. -// CHECK: [NoSideEffect] +// CHECK: [NoMemoryEffect] // It has a result. // CHECK: 1, // It does not require an access group. @@ -42,7 +42,7 @@ // GROUPS-LABEL: def LLVM_ptrmask // GROUPS: LLVM_IntrOp<"ptrmask // It has no side effects. -// GROUPS: [NoSideEffect] +// GROUPS: [NoMemoryEffect] // It has a result. // GROUPS: 1, // It requires generation of an access group LLVM metadata. @@ -64,7 +64,7 @@ // ALIAS-LABEL: def LLVM_ptrmask // ALIAS: LLVM_IntrOp<"ptrmask // It has no side effects. -// ALIAS: [NoSideEffect] +// ALIAS: [NoMemoryEffect] // It has a result. // ALIAS: 1, // It does not require an access group. diff --git a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp --- a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp @@ -211,7 +211,7 @@ if (intr.isCommutative()) traits.push_back("Commutative"); if (!intr.hasSideEffects()) - traits.push_back("NoSideEffect"); + traits.push_back("NoMemoryEffect"); // Prepare strings for operands. llvm::SmallVector operands(intr.getNumOperands(),