diff --git a/mlir/docs/ConversionToLLVMDialect.md b/mlir/docs/ConversionToLLVMDialect.md --- a/mlir/docs/ConversionToLLVMDialect.md +++ b/mlir/docs/ConversionToLLVMDialect.md @@ -35,7 +35,7 @@ Index type is converted to an LLVM dialect integer type with bitwidth equal to the bitwidth of the pointer size as specified by the [data layout](Dialects/LLVM.md#data-layout-and-triple) of the closest module. -For example, on x86-64 CPUs it converts to `!llvm.i64`. This behavior can be +For example, on x86-64 CPUs it converts to `i64`. This behavior can be overridden by the type converter configuration, which is often exposed as a pass option by conversion passes. diff --git a/mlir/docs/Dialects/LLVM.md b/mlir/docs/Dialects/LLVM.md --- a/mlir/docs/Dialects/LLVM.md +++ b/mlir/docs/Dialects/LLVM.md @@ -66,11 +66,11 @@ ```mlir ^bb1: - %0 = llvm.addi %arg0, %cst : !llvm.i32 - llvm.br ^bb2[%0: !llvm.i32] + %0 = llvm.addi %arg0, %cst : i32 + llvm.br ^bb2[%0: i32] // If the control flow comes from ^bb1, %arg1 == %0. -^bb2(%arg1: !llvm.i32) +^bb2(%arg1: i32) // ... ``` @@ -91,9 +91,9 @@ ```mlir ^bb1: - llvm.cond_br %cond, ^bb2[%0: !llvm.i32], ^bb2[%1: !llvm.i32] + llvm.cond_br %cond, ^bb2[%0: i32], ^bb2[%1: i32] -^bb2(%arg0: !llvm.i32): +^bb2(%arg0: i32): // ... ``` @@ -124,7 +124,7 @@ %2 = llvm.mlir.null : !llvm.ptr> // Constant 42 as i32. -%3 = llvm.mlir.constant(42 : i32) : !llvm.i32 +%3 = llvm.mlir.constant(42 : i32) : i32 // Splat dense vector constant. %3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x float> diff --git a/mlir/docs/LLVMDialectMemRefConvention.md b/mlir/docs/LLVMDialectMemRefConvention.md --- a/mlir/docs/LLVMDialectMemRefConvention.md +++ b/mlir/docs/LLVMDialectMemRefConvention.md @@ -31,7 +31,7 @@ // is transformed into -llvm.func @foo(%arg0: !llvm.i32, %arg1: !llvm.i64) -> !llvm.struct<(i32, i64)> { +llvm.func @foo(%arg0: i32, %arg1: i64) -> !llvm.struct<(i32, i64)> { // insert the vales into a structure %0 = llvm.mlir.undef : !llvm.struct<(i32, i64)> %1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(i32, i64)> @@ -41,18 +41,18 @@ llvm.return %2 : !llvm.struct<(i32, i64)> } llvm.func @bar() { - %0 = llvm.mlir.constant(42 : i32) : !llvm.i32 - %1 = llvm.mlir.constant(17) : !llvm.i64 + %0 = llvm.mlir.constant(42 : i32) : i32 + %1 = llvm.mlir.constant(17) : i64 // call and extract the values from the structure %2 = llvm.call @bar(%0, %1) - : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i64)> + : (i32, i32) -> !llvm.struct<(i32, i64)> %3 = llvm.extractvalue %2[0] : !llvm.struct<(i32, i64)> %4 = llvm.extractvalue %2[1] : !llvm.struct<(i32, i64)> // use as before - "use_i32"(%3) : (!llvm.i32) -> () - "use_i64"(%4) : (!llvm.i64) -> () + "use_i32"(%3) : (i32) -> () + "use_i64"(%4) : (i64) -> () } ``` @@ -87,9 +87,9 @@ llvm.func @foo(%arg0: !llvm.ptr, // Allocated pointer. %arg1: !llvm.ptr, // Aligned pointer. - %arg2: !llvm.i64, // Offset. - %arg3: !llvm.i64, // Size in dim 0. - %arg4: !llvm.i64) { // Stride in dim 0. + %arg2: i64, // Offset. + %arg3: i64, // Size in dim 0. + %arg4: i64) { // Stride in dim 0. // Populate memref descriptor structure. %0 = llvm.mlir.undef : %1 = llvm.insertvalue %arg0, %0[0] : !llvm.memref_1d @@ -153,7 +153,7 @@ // Gets converted to the following. -llvm.func @foo(%arg0: !llvm.i64 // Rank. +llvm.func @foo(%arg0: i64 // Rank. %arg1: !llvm.ptr) { // Type-erased pointer to descriptor. // Pack the unranked memref descriptor. %0 = llvm.mlir.undef : !llvm.struct<(i64, ptr)> @@ -182,7 +182,7 @@ %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, ptr)> // Pass individual values to the callee. - llvm.call @foo(%1, %2) : (!llvm.i64, !llvm.ptr) + llvm.call @foo(%1, %2) : (i64, !llvm.ptr) llvm.return } ``` @@ -269,8 +269,8 @@ // Function with unpacked arguments. llvm.func @qux(%arg0: !llvm.ptr, %arg1: !llvm.ptr, - %arg2: !llvm.i64, %arg3: !llvm.i64, %arg4: !llvm.i64, - %arg5: !llvm.i64, %arg6: !llvm.i64) { + %arg2: i64, %arg3: i64, %arg4: i64, + %arg5: i64, %arg6: i64) { // Populate memref descriptor (as per calling convention). %0 = llvm.mlir.undef : !llvm.memref_2d %1 = llvm.insertvalue %arg0, %0[0] : !llvm.memref_2d @@ -282,9 +282,9 @@ %7 = llvm.insertvalue %arg6, %6[4, 1] : !llvm.memref_2d // Store the descriptor in a stack-allocated space. - %8 = llvm.mlir.constant(1 : index) : !llvm.i64 + %8 = llvm.mlir.constant(1 : index) : i64 %9 = llvm.alloca %8 x !llvm.memref_2d - : (!llvm.i64) -> !llvm.ptr, ptr, i64, + : (i64) -> !llvm.ptr, ptr, i64, array<2xi64>, array<2xi64>)>> llvm.store %7, %9 : !llvm.ptr, ptr, i64, array<2xi64>, array<2xi64>)>> @@ -317,8 +317,8 @@ // Function with unpacked arguments. llvm.func @foo(%arg0: !llvm.ptr, %arg1: !llvm.ptr, - %arg2: !llvm.i64, %arg3: !llvm.i64, %arg4: !llvm.i64, - %arg5: !llvm.i64, %arg6: !llvm.i64) { + %arg2: i64, %arg3: i64, %arg4: i64, + %arg5: i64, %arg6: i64) { llvm.return } @@ -336,8 +336,8 @@ %6 = llvm.extractvalue %0[4, 0] : !llvm.memref_2d %7 = llvm.extractvalue %0[4, 1] : !llvm.memref_2d llvm.call @foo(%1, %2, %3, %4, %5, %6, %7) - : (!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64, - !llvm.i64, !llvm.i64) -> () + : (!llvm.ptr, !llvm.ptr, i64, i64, i64, + i64, i64) -> () llvm.return } ``` @@ -397,27 +397,27 @@ // dynamic, extract the stride value from the descriptor. %stride1 = llvm.extractvalue[4, 0] : !llvm.struct<(ptr, ptr, i64, array<4xi64>, array<4xi64>)> -%addr1 = muli %stride1, %1 : !llvm.i64 +%addr1 = muli %stride1, %1 : i64 // When the stride or, in absence of explicit strides, the trailing sizes are // known statically, this value is used as a constant. The natural value of // strides is the product of all sizes following the current dimension. -%stride2 = llvm.mlir.constant(32 : index) : !llvm.i64 -%addr2 = muli %stride2, %2 : !llvm.i64 -%addr3 = addi %addr1, %addr2 : !llvm.i64 +%stride2 = llvm.mlir.constant(32 : index) : i64 +%addr2 = muli %stride2, %2 : i64 +%addr3 = addi %addr1, %addr2 : i64 -%stride3 = llvm.mlir.constant(8 : index) : !llvm.i64 -%addr4 = muli %stride3, %3 : !llvm.i64 -%addr5 = addi %addr3, %addr4 : !llvm.i64 +%stride3 = llvm.mlir.constant(8 : index) : i64 +%addr4 = muli %stride3, %3 : i64 +%addr5 = addi %addr3, %addr4 : i64 // Multiplication with the known unit stride can be omitted. -%addr6 = addi %addr5, %4 : !llvm.i64 +%addr6 = addi %addr5, %4 : i64 // If the linear offset is known to be zero, it can also be omitted. If it is // dynamic, it is extracted from the descriptor. %offset = llvm.extractvalue[2] : !llvm.struct<(ptr, ptr, i64, array<4xi64>, array<4xi64>)> -%addr7 = addi %addr6, %offset : !llvm.i64 +%addr7 = addi %addr6, %offset : i64 // All accesses are based on the aligned pointer. %aligned = llvm.extractvalue[1] : !llvm.struct<(ptr, ptr, i64, diff --git a/mlir/docs/SPIRVToLLVMDialectConversion.md b/mlir/docs/SPIRVToLLVMDialectConversion.md --- a/mlir/docs/SPIRVToLLVMDialectConversion.md +++ b/mlir/docs/SPIRVToLLVMDialectConversion.md @@ -159,8 +159,8 @@ `spv.Not` is modelled with a `xor` operation with a mask with all bits set. ```mlir - %mask = llvm.mlir.constant(-1 : i32) : !llvm.i32 -%0 = spv.Not %op : i32 => %0 = llvm.xor %op, %mask : !llvm.i32 + %mask = llvm.mlir.constant(-1 : i32) : i32 +%0 = spv.Not %op : i32 => %0 = llvm.xor %op, %mask : i32 ``` #### Bitfield ops @@ -189,10 +189,10 @@ ```mlir // Broadcasting offset %offset0 = llvm.mlir.undef : !llvm.vec<2 x i8> - %zero = llvm.mlir.constant(0 : i32) : !llvm.i32 - %offset1 = llvm.insertelement %offset, %offset0[%zero : !llvm.i32] : !llvm.vec<2 x i8> - %one = llvm.mlir.constant(1 : i32) : !llvm.i32 - %vec_offset = llvm.insertelement %offset, %offset1[%one : !llvm.i32] : !llvm.vec<2 x i8> + %zero = llvm.mlir.constant(0 : i32) : i32 + %offset1 = llvm.insertelement %offset, %offset0[%zero : i32] : !llvm.vec<2 x i8> + %one = llvm.mlir.constant(1 : i32) : i32 + %vec_offset = llvm.insertelement %offset, %offset1[%one : i32] : !llvm.vec<2 x i8> // Broadcasting count // ... @@ -209,10 +209,10 @@ ``` Also, note that if the bitwidth of `offset` or `count` is greater than the - bitwidth of `base`, truncation is still permitted. This is because the ops have a - defined behaviour with `offset` and `count` being less than the size of - `base`. It creates a natural upper bound on what values `offset` and `count` - can take, which is 64. This can be expressed in less than 8 bits. + bitwidth of `base`, truncation is still permitted. This is because the ops + have a defined behaviour with `offset` and `count` being less than the size + of `base`. It creates a natural upper bound on what values `offset` and + `count` can take, which is 64. This can be expressed in less than 8 bits. Now, having these two cases in mind, we can proceed with conversion for the ops and their operands. @@ -227,18 +227,18 @@ ```mlir // Create mask -// %minus_one = llvm.mlir.constant(-1 : i32) : !llvm.i32 -// %t0 = llvm.shl %minus_one, %count : !llvm.i32 -// %t1 = llvm.xor %t0, %minus_one : !llvm.i32 -// %t2 = llvm.shl %t1, %offset : !llvm.i32 -// %mask = llvm.xor %t2, %minus_one : !llvm.i32 +// %minus_one = llvm.mlir.constant(-1 : i32) : i32 +// %t0 = llvm.shl %minus_one, %count : i32 +// %t1 = llvm.xor %t0, %minus_one : i32 +// %t2 = llvm.shl %t1, %offset : i32 +// %mask = llvm.xor %t2, %minus_one : i32 // Extract unchanged bits from the Base -// %new_base = llvm.and %base, %mask : !llvm.i32 +// %new_base = llvm.and %base, %mask : i32 // Insert new bits -// %sh_insert = llvm.shl %insert, %offset : !llvm.i32 -// %res = llvm.or %new_base, %sh_insert : !llvm.i32 +// %sh_insert = llvm.shl %insert, %offset : i32 +// %res = llvm.or %new_base, %sh_insert : i32 %res = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32 ``` @@ -251,14 +251,14 @@ ```mlir // Calculate the amount to shift left. -// %size = llvm.mlir.constant(32 : i32) : !llvm.i32 -// %t0 = llvm.add %count, %offset : !llvm.i32 -// %t1 = llvm.sub %size, %t0 : !llvm.i32 +// %size = llvm.mlir.constant(32 : i32) : i32 +// %t0 = llvm.add %count, %offset : i32 +// %t1 = llvm.sub %size, %t0 : i32 // Shift left and then right to extract the bits -// %sh_left = llvm.shl %base, %t1 : !llvm.i32 -// %t2 = llvm.add %offset, %t1 : !llvm.i32 -// %res = llvm.ashr %sh_left, %t2 : !llvm.i32 +// %sh_left = llvm.shl %base, %t1 : i32 +// %t2 = llvm.add %offset, %t1 : i32 +// %res = llvm.ashr %sh_left, %t2 : i32 %res = spv.BitFieldSExtract %base, %offset, %count : i32, i32, i32 ``` @@ -270,13 +270,13 @@ ```mlir // Create a mask -// %minus_one = llvm.mlir.constant(-1 : i32) : !llvm.i32 -// %t0 = llvm.shl %minus_one, %count : !llvm.i32 -// mask = llvm.xor %t0, %minus_one : !llvm.i32 +// %minus_one = llvm.mlir.constant(-1 : i32) : i32 +// %t0 = llvm.shl %minus_one, %count : i32 +// mask = llvm.xor %t0, %minus_one : i32 // Shift Base and apply mask -// %sh_base = llvm.lshr %base, %offset : !llvm.i32 -// %res = llvm.and %sh_base, %mask : !llvm.i32 +// %sh_base = llvm.lshr %base, %offset : i32 +// %res = llvm.and %sh_base, %mask : i32 %res = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32 ``` @@ -371,34 +371,34 @@ First of all, it is important to note that there is no direct representation of entry points in LLVM. At the moment, we use the following approach: -* `spv.EntryPoint` is simply removed. +* `spv.EntryPoint` is simply removed. -* In contrast, `spv.ExecutionMode` may contain important information about the - entry point. For example, `LocalSize` provides information about the - work-group size that can be reused. +* In contrast, `spv.ExecutionMode` may contain important information about the + entry point. For example, `LocalSize` provides information about the + work-group size that can be reused. - In order to preserve this information, `spv.ExecutionMode` is converted to - a struct global variable that stores the execution mode id and any variables - associated with it. In C, the struct has the structure shown below. + In order to preserve this information, `spv.ExecutionMode` is converted to a + struct global variable that stores the execution mode id and any variables + associated with it. In C, the struct has the structure shown below. - ```C - // No values are associated // There are values that are associated - // with this entry point. // with this entry point. - struct { struct { - int32_t executionMode; int32_t executionMode; - }; int32_t values[]; - }; - ``` + ```C + // No values are associated // There are values that are associated + // with this entry point. // with this entry point. + struct { struct { + int32_t executionMode; int32_t executionMode; + }; int32_t values[]; + }; + ``` - ```mlir - // spv.ExecutionMode @empty "ContractionOff" - llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> { - %0 = llvm.mlir.undef : !llvm.struct<(i32)> - %1 = llvm.mlir.constant(31 : i32) : !llvm.i32 - %ret = llvm.insertvalue %1, %0[0 : i32] : !llvm.struct<(i32)> - llvm.return %ret : !llvm.struct<(i32)> - } - ``` + ```mlir + // spv.ExecutionMode @empty "ContractionOff" + llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> { + %0 = llvm.mlir.undef : !llvm.struct<(i32)> + %1 = llvm.mlir.constant(31 : i32) : i32 + %ret = llvm.insertvalue %1, %0[0 : i32] : !llvm.struct<(i32)> + llvm.return %ret : !llvm.struct<(i32)> + } + ``` ### Logical ops @@ -417,8 +417,8 @@ modelled with `xor` operation with a mask with all bits set. ```mlir - %mask = llvm.mlir.constant(-1 : i1) : !llvm.i1 -%0 = spv.LogicalNot %op : i1 => %0 = llvm.xor %op, %mask : !llvm.i1 + %mask = llvm.mlir.constant(-1 : i1) : i1 +%0 = spv.LogicalNot %op : i1 => %0 = llvm.xor %op, %mask : i1 ``` ### Memory ops @@ -441,8 +441,8 @@ // Corresponding LLVM dialect code %i = ... %var = ... -%0 = llvm.mlir.constant(0 : i32) : !llvm.i32 -%el = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr)>>, !llvm.i32, !llvm.i32, !llvm.i32) +%0 = llvm.mlir.constant(0 : i32) : i32 +%el = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr)>>, i32, i32, i32) ``` #### `spv.Load` and `spv.Store` @@ -538,13 +538,13 @@ ```mlir // Conversion of VariableOp without initialization - %size = llvm.mlir.constant(1 : i32) : !llvm.i32 -%res = spv.Variable : !spv.ptr, Function> => %res = llvm.alloca %size x !llvm.vec<3 x float> : (!llvm.i32) -> !llvm.ptr> + %size = llvm.mlir.constant(1 : i32) : i32 +%res = spv.Variable : !spv.ptr, Function> => %res = llvm.alloca %size x !llvm.vec<3 x float> : (i32) -> !llvm.ptr> // Conversion of VariableOp with initialization - %c = llvm.mlir.constant(0 : i64) : !llvm.i64 -%c = spv.constant 0 : i64 %size = llvm.mlir.constant(1 : i32) : !llvm.i32 -%res = spv.Variable init(%c) : !spv.ptr => %res = llvm.alloca %[[SIZE]] x !llvm.i64 : (!llvm.i32) -> !llvm.ptr + %c = llvm.mlir.constant(0 : i64) : i64 +%c = spv.constant 0 : i64 %size = llvm.mlir.constant(1 : i32) : i32 +%res = spv.Variable init(%c) : !spv.ptr => %res = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr llvm.store %c, %res : !llvm.ptr ``` @@ -582,11 +582,11 @@ ```mlir // Shift without extension -%res0 = spv.ShiftRightArithmetic %0, %2 : i32, i32 => %res0 = llvm.ashr %0, %2 : !llvm.i32 +%res0 = spv.ShiftRightArithmetic %0, %2 : i32, i32 => %res0 = llvm.ashr %0, %2 : i32 // Shift with extension - %ext = llvm.sext %1 : !llvm.i16 to !llvm.i32 -%res1 = spv.ShiftRightArithmetic %0, %1 : i32, i16 => %res1 = llvm.ashr %0, %ext: !llvm.i32 + %ext = llvm.sext %1 : i16 to i32 +%res1 = spv.ShiftRightArithmetic %0, %1 : i32, i16 => %res1 = llvm.ashr %0, %ext: i32 ``` ### `spv.constant` @@ -612,7 +612,7 @@ cover all possible corner cases. ```mlir -// %0 = llvm.mlir.constant(0 : i8) : !llvm.i8 +// %0 = llvm.mlir.constant(0 : i8) : i8 %0 = spv.constant 0 : i8 // %1 = llvm.mlir.constant(dense<[2, 3, 4]> : vector<3xi32>) : !llvm.vec<3 x i32> @@ -677,11 +677,11 @@ ```mlir // Conversion of selection -%cond = spv.constant true %cond = llvm.mlir.constant(true) : !llvm.i1 +%cond = spv.constant true %cond = llvm.mlir.constant(true) : i1 spv.selection { spv.BranchConditional %cond, ^true, ^false llvm.cond_br %cond, ^true, ^false -^true: ^true: +^true: ^true: // True block code // True block code spv.Branch ^merge => llvm.br ^merge @@ -692,13 +692,13 @@ ^merge: ^merge: spv.mlir.merge llvm.br ^continue } -// Remaining code ^continue: +// Remaining code ^continue: // Remaining code ``` ```mlir // Conversion of loop -%cond = spv.constant true %cond = llvm.mlir.constant(true) : !llvm.i1 +%cond = spv.constant true %cond = llvm.mlir.constant(true) : i1 spv.loop { spv.Branch ^header llvm.br ^header diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md --- a/mlir/docs/Tutorials/Toy/Ch-6.md +++ b/mlir/docs/Tutorials/Toy/Ch-6.md @@ -127,8 +127,8 @@ ```mlir llvm.func @free(!llvm<"i8*">) -llvm.func @printf(!llvm<"i8*">, ...) -> !llvm.i32 -llvm.func @malloc(!llvm.i64) -> !llvm<"i8*"> +llvm.func @printf(!llvm<"i8*">, ...) -> i32 +llvm.func @malloc(i64) -> !llvm<"i8*"> llvm.func @main() { %0 = llvm.mlir.constant(1.000000e+00 : f64) : !llvm.double %1 = llvm.mlir.constant(2.000000e+00 : f64) : !llvm.double @@ -137,18 +137,18 @@ ^bb16: %221 = llvm.extractvalue %25[0 : index] : !llvm<"{ double*, i64, [2 x i64], [2 x i64] }"> - %222 = llvm.mlir.constant(0 : index) : !llvm.i64 - %223 = llvm.mlir.constant(2 : index) : !llvm.i64 - %224 = llvm.mul %214, %223 : !llvm.i64 - %225 = llvm.add %222, %224 : !llvm.i64 - %226 = llvm.mlir.constant(1 : index) : !llvm.i64 - %227 = llvm.mul %219, %226 : !llvm.i64 - %228 = llvm.add %225, %227 : !llvm.i64 - %229 = llvm.getelementptr %221[%228] : (!llvm<"double*">, !llvm.i64) -> !llvm<"double*"> + %222 = llvm.mlir.constant(0 : index) : i64 + %223 = llvm.mlir.constant(2 : index) : i64 + %224 = llvm.mul %214, %223 : i64 + %225 = llvm.add %222, %224 : i64 + %226 = llvm.mlir.constant(1 : index) : i64 + %227 = llvm.mul %219, %226 : i64 + %228 = llvm.add %225, %227 : i64 + %229 = llvm.getelementptr %221[%228] : (!llvm<"double*">, i64) -> !llvm<"double*"> %230 = llvm.load %229 : !llvm<"double*"> - %231 = llvm.call @printf(%207, %230) : (!llvm<"i8*">, !llvm.double) -> !llvm.i32 - %232 = llvm.add %219, %218 : !llvm.i64 - llvm.br ^bb15(%232 : !llvm.i64) + %231 = llvm.call @printf(%207, %230) : (!llvm<"i8*">, !llvm.double) -> i32 + %232 = llvm.add %219, %218 : i64 + llvm.br ^bb15(%232 : i64) ... diff --git a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp --- a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp +++ b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp @@ -111,9 +111,8 @@ // Create a function declaration for printf, the signature is: // * `i32 (i8*, ...)` - auto llvmI32Ty = LLVM::LLVMIntegerType::get(context, 32); - auto llvmI8PtrTy = - LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8)); + auto llvmI32Ty = IntegerType::get(context, 32); + auto llvmI8PtrTy = LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); auto llvmFnType = LLVM::LLVMFunctionType::get(llvmI32Ty, llvmI8PtrTy, /*isVarArg=*/true); @@ -135,7 +134,7 @@ OpBuilder::InsertionGuard insertGuard(builder); builder.setInsertionPointToStart(module.getBody()); auto type = LLVM::LLVMArrayType::get( - LLVM::LLVMIntegerType::get(builder.getContext(), 8), value.size()); + IntegerType::get(builder.getContext(), 8), value.size()); global = builder.create(loc, type, /*isConstant=*/true, LLVM::Linkage::Internal, name, builder.getStringAttr(value)); @@ -144,12 +143,11 @@ // Get the pointer to the first character in the global string. Value globalPtr = builder.create(loc, global); Value cst0 = builder.create( - loc, LLVM::LLVMIntegerType::get(builder.getContext(), 64), + loc, IntegerType::get(builder.getContext(), 64), builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( loc, - LLVM::LLVMPointerType::get( - LLVM::LLVMIntegerType::get(builder.getContext(), 8)), + LLVM::LLVMPointerType::get(IntegerType::get(builder.getContext(), 8)), globalPtr, ArrayRef({cst0, cst0})); } }; diff --git a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp --- a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp +++ b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp @@ -111,9 +111,8 @@ // Create a function declaration for printf, the signature is: // * `i32 (i8*, ...)` - auto llvmI32Ty = LLVM::LLVMIntegerType::get(context, 32); - auto llvmI8PtrTy = - LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8)); + auto llvmI32Ty = IntegerType::get(context, 32); + auto llvmI8PtrTy = LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); auto llvmFnType = LLVM::LLVMFunctionType::get(llvmI32Ty, llvmI8PtrTy, /*isVarArg=*/true); @@ -135,7 +134,7 @@ OpBuilder::InsertionGuard insertGuard(builder); builder.setInsertionPointToStart(module.getBody()); auto type = LLVM::LLVMArrayType::get( - LLVM::LLVMIntegerType::get(builder.getContext(), 8), value.size()); + IntegerType::get(builder.getContext(), 8), value.size()); global = builder.create(loc, type, /*isConstant=*/true, LLVM::Linkage::Internal, name, builder.getStringAttr(value)); @@ -144,12 +143,11 @@ // Get the pointer to the first character in the global string. Value globalPtr = builder.create(loc, global); Value cst0 = builder.create( - loc, LLVM::LLVMIntegerType::get(builder.getContext(), 64), + loc, IntegerType::get(builder.getContext(), 64), builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( loc, - LLVM::LLVMPointerType::get( - LLVM::LLVMIntegerType::get(builder.getContext(), 8)), + LLVM::LLVMPointerType::get(IntegerType::get(builder.getContext(), 8)), globalPtr, ArrayRef({cst0, cst0})); } }; diff --git a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h --- a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h +++ b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h @@ -180,9 +180,9 @@ /// For example, memref is converted to the following list: /// - `!llvm<"float*">` (allocated pointer), /// - `!llvm<"float*">` (aligned pointer), - /// - `!llvm.i64` (offset), - /// - `!llvm.i64`, `!llvm.i64` (sizes), - /// - `!llvm.i64`, `!llvm.i64` (strides). + /// - `i64` (offset), + /// - `i64`, `i64` (sizes), + /// - `i64`, `i64` (strides). /// These types can be recomposed to a memref descriptor struct. SmallVector getMemRefDescriptorFields(MemRefType type, bool unpackAggregates); @@ -193,7 +193,7 @@ /// - an integer rank, followed by /// - a pointer to the memref descriptor struct. /// For example, memref<*xf32> is converted to the following list: - /// !llvm.i64 (rank) + /// i64 (rank) /// !llvm<"i8*"> (type-erased pointer). /// These types can be recomposed to a unranked memref descriptor struct. SmallVector getUnrankedMemRefDescriptorFields(); @@ -523,15 +523,15 @@ /// strides and buffer size from these sizes. /// /// For example, memref<4x?xf32> emits: - /// `sizes[0]` = llvm.mlir.constant(4 : index) : !llvm.i64 + /// `sizes[0]` = llvm.mlir.constant(4 : index) : i64 /// `sizes[1]` = `dynamicSizes[0]` - /// `strides[1]` = llvm.mlir.constant(1 : index) : !llvm.i64 + /// `strides[1]` = llvm.mlir.constant(1 : index) : i64 /// `strides[0]` = `sizes[0]` - /// %size = llvm.mul `sizes[0]`, `sizes[1]` : !llvm.i64 + /// %size = llvm.mul `sizes[0]`, `sizes[1]` : i64 /// %nullptr = llvm.mlir.null : !llvm.ptr /// %gep = llvm.getelementptr %nullptr[%size] - /// : (!llvm.ptr, !llvm.i64) -> !llvm.ptr - /// `sizeBytes` = llvm.ptrtoint %gep : !llvm.ptr to !llvm.i64 + /// : (!llvm.ptr, i64) -> !llvm.ptr + /// `sizeBytes` = llvm.ptrtoint %gep : !llvm.ptr to i64 void getMemRefDescriptorSizes(Location loc, MemRefType memRefType, ArrayRef dynamicSizes, ConversionPatternRewriter &rewriter, diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td @@ -54,18 +54,18 @@ // Type constraint accepting LLVM integer types. def LLVM_AnyInteger : Type< - CPred<"$_self.isa<::mlir::LLVM::LLVMIntegerType>()">, + CPred<"$_self.isa<::mlir::IntegerType>()">, "LLVM integer type">; // Type constraints accepting LLVM integer type of a specific width. class LLVM_IntBase : Type().getBitWidth() == " + CPred<"$_self.cast<::mlir::IntegerType>().getWidth() == " # width>]>, "LLVM " # width # "-bit integer type">, BuildableType< - "::mlir::LLVM::LLVMIntegerType::get($_builder.getContext(), " + "::mlir::IntegerType::get($_builder.getContext(), " # width # ")">; def LLVM_i1 : LLVM_IntBase<1>; diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -184,7 +184,7 @@ let builders = [ OpBuilderDAG<(ins "ICmpPredicate":$predicate, "Value":$lhs, "Value":$rhs), [{ - build($_builder, $_state, LLVMIntegerType::get(lhs.getType().getContext(), 1), + build($_builder, $_state, IntegerType::get(lhs.getType().getContext(), 1), $_builder.getI64IntegerAttr(static_cast(predicate)), lhs, rhs); }]>]; let parser = [{ return parseCmpOp(parser, result); }]; @@ -235,7 +235,7 @@ OpBuilderDAG<(ins "FCmpPredicate":$predicate, "Value":$lhs, "Value":$rhs, CArg<"FastmathFlags", "{}">:$fmf), [{ - build($_builder, $_state, LLVMIntegerType::get(lhs.getType().getContext(), 1), + build($_builder, $_state, IntegerType::get(lhs.getType().getContext(), 1), $_builder.getI64IntegerAttr(static_cast(predicate)), lhs, rhs, ::mlir::LLVM::FMFAttr::get(fmf, $_builder.getContext())); }]>]; @@ -791,7 +791,7 @@ } // Define the global. - llvm.mlir.global @const(42 : i32) : !llvm.i32 + llvm.mlir.global @const(42 : i32) : i32 ``` }]; @@ -862,9 +862,9 @@ // i32* getelementptr (i32* @g2, i32 2) llvm.mlir.global constant @int_gep() : !llvm.ptr { %0 = llvm.mlir.addressof @g2 : !llvm.ptr - %1 = llvm.mlir.constant(2 : i32) : !llvm.i32 + %1 = llvm.mlir.constant(2 : i32) : i32 %2 = llvm.getelementptr %0[%1] - : (!llvm.ptr, !llvm.i32) -> !llvm.ptr + : (!llvm.ptr, i32) -> !llvm.ptr // The initializer region must end with `llvm.return`. llvm.return %2 : !llvm.ptr } @@ -880,7 +880,7 @@ ```mlir // Global values use @-identifiers. - llvm.mlir.global constant @cst(42 : i32) : !llvm.i32 + llvm.mlir.global constant @cst(42 : i32) : i32 // Non-constant values must also be initialized. llvm.mlir.global @variable(32.0 : f32) : !llvm.float @@ -895,9 +895,9 @@ // A complex initializer is constructed with an initializer region. llvm.mlir.global constant @int_gep() : !llvm.ptr { %0 = llvm.mlir.addressof @g2 : !llvm.ptr - %1 = llvm.mlir.constant(2 : i32) : !llvm.i32 + %1 = llvm.mlir.constant(2 : i32) : i32 %2 = llvm.getelementptr %0[%1] - : (!llvm.ptr, !llvm.i32) -> !llvm.ptr + : (!llvm.ptr, i32) -> !llvm.ptr llvm.return %2 : !llvm.ptr } ``` @@ -911,7 +911,7 @@ ```mlir // A constant with internal linkage will not participate in linking. - llvm.mlir.global internal constant @cst(42 : i32) : !llvm.i32 + llvm.mlir.global internal constant @cst(42 : i32) : i32 // By default, "external" linkage is assumed and the global participates in // symbol resolution at link-time. @@ -969,13 +969,13 @@ ```mlir // The type of @bar is !llvm<"i64 (i64)"> - llvm.func @bar(%arg0: !llvm.i64) -> !llvm.i64 { - llvm.return %arg0 : !llvm.i64 + llvm.func @bar(%arg0: i64) -> i64 { + llvm.return %arg0 : i64 } // Type type of @foo is !llvm<"void (i64)"> // !llvm.void type is omitted - llvm.func @foo(%arg0: !llvm.i64) { + llvm.func @foo(%arg0: i64) { llvm.return } @@ -1102,10 +1102,10 @@ ```mlir // Integer constant, internal i32 is mandatory - %0 = llvm.mlir.constant(42 : i32) : !llvm.i32 + %0 = llvm.mlir.constant(42 : i32) : i32 // It's okay to omit i64. - %1 = llvm.mlir.constant(42) : !llvm.i64 + %1 = llvm.mlir.constant(42) : i64 // Floating point constant. %2 = llvm.mlir.constant(42.0 : f32) : !llvm.float diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h @@ -31,7 +31,6 @@ namespace detail { struct LLVMFunctionTypeStorage; -struct LLVMIntegerTypeStorage; struct LLVMPointerTypeStorage; struct LLVMStructTypeStorage; struct LLVMTypeAndSizeStorage; @@ -155,30 +154,6 @@ bool); }; -//===----------------------------------------------------------------------===// -// LLVMIntegerType. -//===----------------------------------------------------------------------===// - -/// LLVM dialect signless integer type parameterized by bitwidth. -class LLVMIntegerType : public Type::TypeBase { -public: - /// Inherit base constructor. - using Base::Base; - - /// Gets or creates an instance of the integer of the specified `bitwidth` in - /// the given context. - static LLVMIntegerType get(MLIRContext *ctx, unsigned bitwidth); - static LLVMIntegerType getChecked(Location loc, unsigned bitwidth); - - /// Returns the bitwidth of this integer type. - unsigned getBitWidth(); - - /// Verifies that the type about to be constructed is well-formed. - static LogicalResult verifyConstructionInvariants(Location loc, - unsigned bitwidth); -}; - //===----------------------------------------------------------------------===// // LLVMPointerType. //===----------------------------------------------------------------------===// @@ -412,30 +387,7 @@ //===----------------------------------------------------------------------===// /// Returns `true` if the given type is compatible with the LLVM dialect. -inline bool isCompatibleType(Type type) { - // clang-format off - return type.isa< - LLVMArrayType, - LLVMBFloatType, - LLVMDoubleType, - LLVMFP128Type, - LLVMFloatType, - LLVMFunctionType, - LLVMHalfType, - LLVMIntegerType, - LLVMLabelType, - LLVMMetadataType, - LLVMPPCFP128Type, - LLVMPointerType, - LLVMStructType, - LLVMTokenType, - LLVMVectorType, - LLVMVoidType, - LLVMX86FP80Type, - LLVMX86MMXType - >(); - // clang-format on -} +bool isCompatibleType(Type type); inline bool isCompatibleFloatingPointType(Type type) { return type.isa is 64. Returns 0 for non-primitive /// (aggregates such as struct) or types that don't have a size (such as void). llvm::TypeSize getPrimitiveTypeSizeInBits(Type type); diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -111,9 +111,9 @@ return success(); auto type = getType().dyn_cast(); auto elementType = (type && type.getBody().size() == 2) - ? type.getBody()[1].dyn_cast() + ? type.getBody()[1].dyn_cast() : nullptr; - if (!elementType || elementType.getBitWidth() != 1) + if (!elementType || elementType.getWidth() != 1) return emitError("expected return type to be a two-element struct with " "i1 as the second element"); return success(); diff --git a/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir --- a/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir +++ b/mlir/integration_test/Dialect/LLVMIR/CPU/X86/test-inline-asm.mlir @@ -3,14 +3,14 @@ // RUN: FileCheck %s module { - llvm.func @printI64(!llvm.i64) + llvm.func @printI64(i64) llvm.func @entry() { - %c2 = llvm.mlir.constant(-42: i64) :!llvm.i64 + %c2 = llvm.mlir.constant(-42: i64) :i64 %val = llvm.inline_asm "xor $0, $0", "=r,r" %c2 : - (!llvm.i64) -> !llvm.i64 + (i64) -> i64 // CHECK: 0 - llvm.call @printI64(%val) : (!llvm.i64) -> () + llvm.call @printI64(%val) : (i64) -> () llvm.return } } diff --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir --- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir +++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir @@ -13,16 +13,16 @@ %2 = llvm.mlir.constant(3.000000e+00 : f32) : !llvm.float %3 = llvm.mlir.constant(4.000000e+00 : f32) : !llvm.float %4 = llvm.mlir.undef : !llvm.vec<4 x float> - %5 = llvm.mlir.constant(0 : index) : !llvm.i64 - %6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x float> + %5 = llvm.mlir.constant(0 : index) : i64 + %6 = llvm.insertelement %0, %4[%5 : i64] : !llvm.vec<4 x float> %7 = llvm.shufflevector %6, %4 [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float> - %8 = llvm.mlir.constant(1 : i64) : !llvm.i64 - %9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x float> - %10 = llvm.mlir.constant(2 : i64) : !llvm.i64 - %11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x float> - %12 = llvm.mlir.constant(3 : i64) : !llvm.i64 - %v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x float> + %8 = llvm.mlir.constant(1 : i64) : i64 + %9 = llvm.insertelement %1, %7[%8 : i64] : !llvm.vec<4 x float> + %10 = llvm.mlir.constant(2 : i64) : i64 + %11 = llvm.insertelement %2, %9[%10 : i64] : !llvm.vec<4 x float> + %12 = llvm.mlir.constant(3 : i64) : i64 + %v = llvm.insertelement %3, %11[%12 : i64] : !llvm.vec<4 x float> %max = "llvm.intr.vector.reduce.fmax"(%v) : (!llvm.vec<4 x float>) -> !llvm.float diff --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir --- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir +++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir @@ -5,76 +5,76 @@ // End-to-end test of all int reduction intrinsics (not exhaustive unit tests). module { llvm.func @printNewline() - llvm.func @printI64(!llvm.i64) + llvm.func @printI64(i64) llvm.func @entry() { // Setup (1,2,3,4). - %0 = llvm.mlir.constant(1 : i64) : !llvm.i64 - %1 = llvm.mlir.constant(2 : i64) : !llvm.i64 - %2 = llvm.mlir.constant(3 : i64) : !llvm.i64 - %3 = llvm.mlir.constant(4 : i64) : !llvm.i64 + %0 = llvm.mlir.constant(1 : i64) : i64 + %1 = llvm.mlir.constant(2 : i64) : i64 + %2 = llvm.mlir.constant(3 : i64) : i64 + %3 = llvm.mlir.constant(4 : i64) : i64 %4 = llvm.mlir.undef : !llvm.vec<4 x i64> - %5 = llvm.mlir.constant(0 : index) : !llvm.i64 - %6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x i64> + %5 = llvm.mlir.constant(0 : index) : i64 + %6 = llvm.insertelement %0, %4[%5 : i64] : !llvm.vec<4 x i64> %7 = llvm.shufflevector %6, %4 [0 : i64, 0 : i64, 0 : i64, 0 : i64] : !llvm.vec<4 x i64>, !llvm.vec<4 x i64> - %8 = llvm.mlir.constant(1 : i64) : !llvm.i64 - %9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x i64> - %10 = llvm.mlir.constant(2 : i64) : !llvm.i64 - %11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x i64> - %12 = llvm.mlir.constant(3 : i64) : !llvm.i64 - %v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x i64> + %8 = llvm.mlir.constant(1 : i64) : i64 + %9 = llvm.insertelement %1, %7[%8 : i64] : !llvm.vec<4 x i64> + %10 = llvm.mlir.constant(2 : i64) : i64 + %11 = llvm.insertelement %2, %9[%10 : i64] : !llvm.vec<4 x i64> + %12 = llvm.mlir.constant(3 : i64) : i64 + %v = llvm.insertelement %3, %11[%12 : i64] : !llvm.vec<4 x i64> %add = "llvm.intr.vector.reduce.add"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%add) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%add) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 10 %and = "llvm.intr.vector.reduce.and"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%and) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%and) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 0 %mul = "llvm.intr.vector.reduce.mul"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%mul) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%mul) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 24 %or = "llvm.intr.vector.reduce.or"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%or) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%or) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 7 %smax = "llvm.intr.vector.reduce.smax"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%smax) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%smax) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 4 %smin = "llvm.intr.vector.reduce.smin"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%smin) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%smin) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 1 %umax = "llvm.intr.vector.reduce.umax"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%umax) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%umax) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 4 %umin = "llvm.intr.vector.reduce.umin"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%umin) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%umin) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 1 %xor = "llvm.intr.vector.reduce.xor"(%v) - : (!llvm.vec<4 x i64>) -> !llvm.i64 - llvm.call @printI64(%xor) : (!llvm.i64) -> () + : (!llvm.vec<4 x i64>) -> i64 + llvm.call @printI64(%xor) : (i64) -> () llvm.call @printNewline() : () -> () // CHECK: 4 diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp --- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp +++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp @@ -66,7 +66,7 @@ struct AsyncAPI { // All async types are lowered to opaque i8* LLVM pointers at runtime. static LLVM::LLVMPointerType opaquePointerType(MLIRContext *ctx) { - return LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(ctx, 8)); + return LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)); } static FunctionType addOrDropRefFunctionType(MLIRContext *ctx) { @@ -222,10 +222,10 @@ auto token = LLVMTokenType::get(ctx); auto voidTy = LLVMVoidType::get(ctx); - auto i8 = LLVMIntegerType::get(ctx, 8); - auto i1 = LLVMIntegerType::get(ctx, 1); - auto i32 = LLVMIntegerType::get(ctx, 32); - auto i64 = LLVMIntegerType::get(ctx, 64); + auto i8 = IntegerType::get(ctx, 8); + auto i1 = IntegerType::get(ctx, 1); + auto i32 = IntegerType::get(ctx, 32); + auto i64 = IntegerType::get(ctx, 64); auto i8Ptr = LLVMPointerType::get(i8); addLLVMFuncDecl(module, builder, kCoroId, token, {i32, i8Ptr, i8Ptr, i8Ptr}); @@ -254,8 +254,8 @@ module.getBody()->getTerminator()); auto voidTy = LLVMVoidType::get(ctx); - auto i64 = LLVMIntegerType::get(ctx, 64); - auto i8Ptr = LLVMPointerType::get(LLVMIntegerType::get(ctx, 8)); + auto i64 = IntegerType::get(ctx, 64); + auto i8Ptr = LLVMPointerType::get(IntegerType::get(ctx, 8)); addLLVMFuncDecl(module, builder, kMalloc, i8Ptr, {i64}); addLLVMFuncDecl(module, builder, kFree, voidTy, {i8Ptr}); @@ -280,7 +280,7 @@ return; auto voidTy = LLVM::LLVMVoidType::get(ctx); - auto i8Ptr = LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(ctx, 8)); + auto i8Ptr = LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)); auto resumeOp = moduleBuilder.create( loc, kResume, LLVM::LLVMFunctionType::get(voidTy, {i8Ptr})); @@ -361,10 +361,10 @@ MLIRContext *ctx = func.getContext(); auto token = LLVM::LLVMTokenType::get(ctx); - auto i1 = LLVM::LLVMIntegerType::get(ctx, 1); - auto i32 = LLVM::LLVMIntegerType::get(ctx, 32); - auto i64 = LLVM::LLVMIntegerType::get(ctx, 64); - auto i8Ptr = LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(ctx, 8)); + auto i1 = IntegerType::get(ctx, 1); + auto i32 = IntegerType::get(ctx, 32); + auto i64 = IntegerType::get(ctx, 64); + auto i8Ptr = LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)); Block *entryBlock = func.addEntryBlock(); Location loc = func.getBody().getLoc(); @@ -393,11 +393,7 @@ builder.getI32IntegerAttr(1)); auto gep = builder.create(loc, storagePtrType, nullPtr, one.getResult()); - auto size = builder.create(loc, i32, gep); - - // Cast to std type because runtime API defined using std types. - return builder.create(loc, builder.getI32Type(), - size.getResult()); + return builder.create(loc, i32, gep); }; // We use the `async.value` type as a return type although it does not match @@ -529,8 +525,8 @@ OpBuilder &builder) { Location loc = op->getLoc(); MLIRContext *ctx = op->getContext(); - auto i1 = LLVM::LLVMIntegerType::get(ctx, 1); - auto i8 = LLVM::LLVMIntegerType::get(ctx, 8); + auto i1 = IntegerType::get(ctx, 1); + auto i8 = IntegerType::get(ctx, 8); // Add a coroutine suspension in place of original `op` in the split block. OpBuilder::InsertionGuard guard(builder); diff --git a/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp b/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp --- a/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp +++ b/mlir/lib/Conversion/GPUCommon/ConvertLaunchFuncToRuntimeCalls.cpp @@ -75,12 +75,12 @@ Type llvmVoidType = LLVM::LLVMVoidType::get(context); Type llvmPointerType = - LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8)); + LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); Type llvmPointerPointerType = LLVM::LLVMPointerType::get(llvmPointerType); - Type llvmInt8Type = LLVM::LLVMIntegerType::get(context, 8); - Type llvmInt32Type = LLVM::LLVMIntegerType::get(context, 32); - Type llvmInt64Type = LLVM::LLVMIntegerType::get(context, 64); - Type llvmIntPtrType = LLVM::LLVMIntegerType::get( + Type llvmInt8Type = IntegerType::get(context, 8); + Type llvmInt32Type = IntegerType::get(context, 32); + Type llvmInt64Type = IntegerType::get(context, 64); + Type llvmIntPtrType = IntegerType::get( context, this->getTypeConverter()->getPointerBitwidth(0)); FunctionCallBuilder moduleLoadCallBuilder = { @@ -716,10 +716,10 @@ void mlir::populateGpuToLLVMConversionPatterns( LLVMTypeConverter &converter, OwningRewritePatternList &patterns, StringRef gpuBinaryAnnotation) { - converter.addConversion([context = &converter.getContext()]( - gpu::AsyncTokenType type) -> Type { - return LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8)); - }); + converter.addConversion( + [context = &converter.getContext()](gpu::AsyncTokenType type) -> Type { + return LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); + }); patterns.insert(); diff --git a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h --- a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h +++ b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h @@ -48,16 +48,13 @@ Value newOp; switch (dimensionToIndex(op)) { case X: - newOp = - rewriter.create(loc, LLVM::LLVMIntegerType::get(context, 32)); + newOp = rewriter.create(loc, IntegerType::get(context, 32)); break; case Y: - newOp = - rewriter.create(loc, LLVM::LLVMIntegerType::get(context, 32)); + newOp = rewriter.create(loc, IntegerType::get(context, 32)); break; case Z: - newOp = - rewriter.create(loc, LLVM::LLVMIntegerType::get(context, 32)); + newOp = rewriter.create(loc, IntegerType::get(context, 32)); break; default: return failure(); @@ -65,10 +62,10 @@ if (indexBitwidth > 32) { newOp = rewriter.create( - loc, LLVM::LLVMIntegerType::get(context, indexBitwidth), newOp); + loc, IntegerType::get(context, indexBitwidth), newOp); } else if (indexBitwidth < 32) { newOp = rewriter.create( - loc, LLVM::LLVMIntegerType::get(context, indexBitwidth), newOp); + loc, IntegerType::get(context, indexBitwidth), newOp); } rewriter.replaceOp(op, {newOp}); diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -40,10 +40,10 @@ /// which threads participate in the shuffle) and a maskAndClamp (specifying /// the highest lane which participates in the shuffle). /// - /// %one = llvm.constant(1 : i32) : !llvm.i32 - /// %shl = llvm.shl %one, %width : !llvm.i32 - /// %active_mask = llvm.sub %shl, %one : !llvm.i32 - /// %mask_and_clamp = llvm.sub %width, %one : !llvm.i32 + /// %one = llvm.constant(1 : i32) : i32 + /// %shl = llvm.shl %one, %width : i32 + /// %active_mask = llvm.sub %shl, %one : i32 + /// %mask_and_clamp = llvm.sub %width, %one : i32 /// %shfl = nvvm.shfl.sync.bfly %active_mask, %value, %offset, /// %mask_and_clamp : !llvm<"{ float, i1 }"> /// %shfl_value = llvm.extractvalue %shfl[0 : index] : @@ -57,8 +57,8 @@ gpu::ShuffleOpAdaptor adaptor(operands); auto valueTy = adaptor.value().getType(); - auto int32Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 32); - auto predTy = LLVM::LLVMIntegerType::get(rewriter.getContext(), 1); + auto int32Type = IntegerType::get(rewriter.getContext(), 32); + auto predTy = IntegerType::get(rewriter.getContext(), 1); auto resultTy = LLVM::LLVMStructType::getLiteral(rewriter.getContext(), {valueTy, predTy}); diff --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp --- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp +++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp @@ -59,10 +59,10 @@ void initializeCachedTypes() { llvmFloatType = LLVM::LLVMFloatType::get(&getContext()); llvmVoidType = LLVM::LLVMVoidType::get(&getContext()); - llvmPointerType = LLVM::LLVMPointerType::get( - LLVM::LLVMIntegerType::get(&getContext(), 8)); - llvmInt32Type = LLVM::LLVMIntegerType::get(&getContext(), 32); - llvmInt64Type = LLVM::LLVMIntegerType::get(&getContext(), 64); + llvmPointerType = + LLVM::LLVMPointerType::get(IntegerType::get(&getContext(), 8)); + llvmInt32Type = IntegerType::get(&getContext(), 32); + llvmInt64Type = IntegerType::get(&getContext(), 64); } Type getMemRefType(uint32_t rank, Type elemenType) { @@ -136,12 +136,12 @@ return "Float"; if (type.isa()) return "Half"; - if (auto intType = type.dyn_cast()) { - if (intType.getBitWidth() == 32) + if (auto intType = type.dyn_cast()) { + if (intType.getWidth() == 32) return "Int32"; - if (intType.getBitWidth() == 16) + if (intType.getWidth() == 16) return "Int16"; - if (intType.getBitWidth() == 8) + if (intType.getWidth() == 8) return "Int8"; } @@ -242,8 +242,7 @@ // Special case for fp16 type. Since it is not a supported type in C we use // int16_t and bitcast the descriptor. if (type.isa()) { - auto memRefTy = - getMemRefType(rank, LLVM::LLVMIntegerType::get(&getContext(), 16)); + auto memRefTy = getMemRefType(rank, IntegerType::get(&getContext(), 16)); ptrToMemRefDescriptor = builder.create( loc, LLVM::LLVMPointerType::get(memRefTy), ptrToMemRefDescriptor); } @@ -325,15 +324,15 @@ for (unsigned i = 1; i <= 3; i++) { SmallVector types{LLVM::LLVMFloatType::get(&getContext()), - LLVM::LLVMIntegerType::get(&getContext(), 32), - LLVM::LLVMIntegerType::get(&getContext(), 16), - LLVM::LLVMIntegerType::get(&getContext(), 8), + IntegerType::get(&getContext(), 32), + IntegerType::get(&getContext(), 16), + IntegerType::get(&getContext(), 8), LLVM::LLVMHalfType::get(&getContext())}; for (auto type : types) { std::string fnName = "bindMemRef" + std::to_string(i) + "D" + std::string(stringifyType(type)); if (type.isa()) - type = LLVM::LLVMIntegerType::get(&getContext(), 16); + type = IntegerType::get(&getContext(), 16); if (!module.lookupSymbol(fnName)) { auto fnType = LLVM::LLVMFunctionType::get( getVoidType(), diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp --- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp @@ -60,7 +60,7 @@ static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder) { MLIRContext *context = builder.getContext(); - auto llvmI1Type = LLVM::LLVMIntegerType::get(context, 1); + auto llvmI1Type = IntegerType::get(context, 1); Value isVolatile = builder.create( loc, llvmI1Type, builder.getBoolAttr(false)); builder.create(loc, dst, src, size, isVolatile); diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp --- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp @@ -68,8 +68,8 @@ static unsigned getLLVMTypeBitWidth(Type type) { auto vectorType = type.dyn_cast(); return (vectorType ? vectorType.getElementType() : type) - .cast() - .getBitWidth(); + .cast() + .getWidth(); } /// Creates `IntegerAttribute` with all bits set for given type @@ -213,7 +213,7 @@ static Value createI32ConstantOf(Location loc, PatternRewriter &rewriter, unsigned value) { return rewriter.create( - loc, LLVM::LLVMIntegerType::get(rewriter.getContext(), 32), + loc, IntegerType::get(rewriter.getContext(), 32), rewriter.getIntegerAttr(rewriter.getI32Type(), value)); } @@ -661,7 +661,7 @@ // int32_t executionMode; // int32_t values[]; // optional values // }; - auto llvmI32Type = LLVM::LLVMIntegerType::get(context, 32); + auto llvmI32Type = IntegerType::get(context, 32); SmallVector fields; fields.push_back(llvmI32Type); ArrayAttr values = op.values(); diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -174,7 +174,7 @@ } Type LLVMTypeConverter::getIndexType() { - return LLVM::LLVMIntegerType::get(&getContext(), getIndexTypeBitwidth()); + return IntegerType::get(&getContext(), getIndexTypeBitwidth()); } unsigned LLVMTypeConverter::getPointerBitwidth(unsigned addressSpace) { @@ -186,7 +186,7 @@ } Type LLVMTypeConverter::convertIntegerType(IntegerType type) { - return LLVM::LLVMIntegerType::get(&getContext(), type.getWidth()); + return IntegerType::get(&getContext(), type.getWidth()); } Type LLVMTypeConverter::convertFloatType(FloatType type) { @@ -361,8 +361,8 @@ /// stack allocated (alloca) copy of a MemRef descriptor that got casted to /// be unranked. SmallVector LLVMTypeConverter::getUnrankedMemRefDescriptorFields() { - return {getIndexType(), LLVM::LLVMPointerType::get( - LLVM::LLVMIntegerType::get(&getContext(), 8))}; + return {getIndexType(), + LLVM::LLVMPointerType::get(IntegerType::get(&getContext(), 8))}; } Type LLVMTypeConverter::convertUnrankedMemRefType(UnrankedMemRefType type) { @@ -1021,9 +1021,8 @@ } Type ConvertToLLVMPattern::getIntPtrType(unsigned addressSpace) const { - return LLVM::LLVMIntegerType::get( - &getTypeConverter()->getContext(), - getTypeConverter()->getPointerBitwidth(addressSpace)); + return IntegerType::get(&getTypeConverter()->getContext(), + getTypeConverter()->getPointerBitwidth(addressSpace)); } Type ConvertToLLVMPattern::getVoidType() const { @@ -1032,7 +1031,7 @@ Type ConvertToLLVMPattern::getVoidPtrType() const { return LLVM::LLVMPointerType::get( - LLVM::LLVMIntegerType::get(&getTypeConverter()->getContext(), 8)); + IntegerType::get(&getTypeConverter()->getContext(), 8)); } Value ConvertToLLVMPattern::createIndexConstant( @@ -2197,9 +2196,8 @@ // Get frequently used types. MLIRContext *context = builder.getContext(); auto voidType = LLVM::LLVMVoidType::get(context); - Type voidPtrType = - LLVM::LLVMPointerType::get(LLVM::LLVMIntegerType::get(context, 8)); - auto i1Type = LLVM::LLVMIntegerType::get(context, 1); + Type voidPtrType = LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); + auto i1Type = IntegerType::get(context, 1); Type indexType = typeConverter.getIndexType(); // Find the malloc and free, or declare them if necessary. @@ -2838,7 +2836,7 @@ Value zeroIndex = createIndexConstant(rewriter, loc, 0); Value pred = rewriter.create( - loc, LLVM::LLVMIntegerType::get(rewriter.getContext(), 1), + loc, IntegerType::get(rewriter.getContext(), 1), LLVM::ICmpPredicate::sge, indexArg, zeroIndex); Block *bodyBlock = @@ -3107,10 +3105,10 @@ auto targetType = typeConverter->convertType(indexCastOp.getResult().getType()) - .cast(); - auto sourceType = transformed.in().getType().cast(); - unsigned targetBits = targetType.getBitWidth(); - unsigned sourceBits = sourceType.getBitWidth(); + .cast(); + auto sourceType = transformed.in().getType().cast(); + unsigned targetBits = targetType.getWidth(); + unsigned sourceBits = sourceType.getWidth(); if (targetBits == sourceBits) rewriter.replaceOp(indexCastOp, transformed.in()); @@ -3870,7 +3868,7 @@ // Append the cmpxchg op to the end of the loop block. auto successOrdering = LLVM::AtomicOrdering::acq_rel; auto failureOrdering = LLVM::AtomicOrdering::monotonic; - auto boolType = LLVM::LLVMIntegerType::get(rewriter.getContext(), 1); + auto boolType = IntegerType::get(rewriter.getContext(), 1); auto pairType = LLVM::LLVMStructType::getLiteral(rewriter.getContext(), {valueType, boolType}); auto cmpxchg = rewriter.create( @@ -4054,7 +4052,7 @@ Value LLVMTypeConverter::promoteOneMemRefDescriptor(Location loc, Value operand, OpBuilder &builder) { auto *context = builder.getContext(); - auto int64Ty = LLVM::LLVMIntegerType::get(builder.getContext(), 64); + auto int64Ty = IntegerType::get(builder.getContext(), 64); auto indexType = IndexType::get(context); // Alloca with proper alignment. We do not expect optimizations of this // alloca op and so we omit allocating at the entry block. diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -747,7 +747,7 @@ // Remaining extraction of element from 1-D LLVM vector auto position = positionAttrs.back().cast(); - auto i64Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64); + auto i64Type = IntegerType::get(rewriter.getContext(), 64); auto constant = rewriter.create(loc, i64Type, position); extracted = rewriter.create(loc, extracted, constant); @@ -855,7 +855,7 @@ } // Insertion of an element into a 1-D LLVM vector. - auto i64Type = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64); + auto i64Type = IntegerType::get(rewriter.getContext(), 64); auto constant = rewriter.create(loc, i64Type, position); Value inserted = rewriter.create( loc, typeConverter->convertType(oneDVectorType), extracted, @@ -1121,7 +1121,7 @@ })) return failure(); - auto int64Ty = LLVM::LLVMIntegerType::get(rewriter.getContext(), 64); + auto int64Ty = IntegerType::get(rewriter.getContext(), 64); // Create descriptor. auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); @@ -1360,11 +1360,11 @@ switch (conversion) { case PrintConversion::ZeroExt64: value = rewriter.create( - loc, value, LLVM::LLVMIntegerType::get(rewriter.getContext(), 64)); + loc, value, IntegerType::get(rewriter.getContext(), 64)); break; case PrintConversion::SignExt64: value = rewriter.create( - loc, value, LLVM::LLVMIntegerType::get(rewriter.getContext(), 64)); + loc, value, IntegerType::get(rewriter.getContext(), 64)); break; case PrintConversion::None: break; @@ -1414,12 +1414,10 @@ // Helpers for method names. Operation *getPrintI64(Operation *op) const { - return getPrint(op, "printI64", - LLVM::LLVMIntegerType::get(op->getContext(), 64)); + return getPrint(op, "printI64", IntegerType::get(op->getContext(), 64)); } Operation *getPrintU64(Operation *op) const { - return getPrint(op, "printU64", - LLVM::LLVMIntegerType::get(op->getContext(), 64)); + return getPrint(op, "printU64", IntegerType::get(op->getContext(), 64)); } Operation *getPrintFloat(Operation *op) const { return getPrint(op, "printF32", LLVM::LLVMFloatType::get(op->getContext())); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -146,7 +146,7 @@ // The result type is either i1 or a vector type if the inputs are // vectors. - Type resultType = LLVMIntegerType::get(builder.getContext(), 1); + Type resultType = IntegerType::get(builder.getContext(), 1); if (!isCompatibleType(type)) return parser.emitError(trailingTypeLoc, "expected LLVM dialect-compatible type"); @@ -1254,7 +1254,7 @@ // TODO: make the size depend on data layout rather than on the conversion // pass option, and pull that information here. static LogicalResult verifyCastWithIndex(Type llvmType) { - return success(llvmType.isa()); + return success(llvmType.isa()); } /// Checks if `llvmType` is dialect cast-compatible with built-in `type` and @@ -1294,19 +1294,6 @@ "invalid cast between f64 and a type other than !llvm.double"); } - // Singless integers are compatible with LLVM integer of the same bitwidth. - if (type.isSignlessInteger()) { - auto llvmInt = llvmType.dyn_cast(); - if (!llvmInt) - return op->emitOpError( - "invalid cast between integer and non-integer type"); - if (llvmInt.getBitWidth() == type.getIntOrFloatBitWidth()) - return success(); - - return op->emitOpError( - "invalid cast between integers with mismatching bitwidth"); - } - // Vectors are compatible if they are 1D non-scalable, and their element types // are compatible. if (auto vectorType = type.dyn_cast()) { @@ -1413,9 +1400,8 @@ auto ptrType = structType.getBody()[1].dyn_cast(); auto ptrElementType = - ptrType ? ptrType.getElementType().dyn_cast() - : nullptr; - if (!ptrElementType || ptrElementType.getBitWidth() != 8) + ptrType ? ptrType.getElementType().dyn_cast() : nullptr; + if (!ptrElementType || ptrElementType.getWidth() != 8) return op->emitOpError("expected second element of a memref descriptor " "to be an !llvm.ptr"); @@ -1515,8 +1501,8 @@ if (types.empty()) { if (auto strAttr = value.dyn_cast_or_null()) { MLIRContext *context = parser.getBuilder().getContext(); - auto arrayType = LLVM::LLVMArrayType::get( - LLVM::LLVMIntegerType::get(context, 8), strAttr.getValue().size()); + auto arrayType = LLVM::LLVMArrayType::get(IntegerType::get(context, 8), + strAttr.getValue().size()); types.push_back(arrayType); } else { return parser.emitError(parser.getNameLoc(), @@ -1543,9 +1529,9 @@ if (auto strAttr = op.getValueOrNull().dyn_cast_or_null()) { auto type = op.getType().dyn_cast(); - LLVMIntegerType elementType = - type ? type.getElementType().dyn_cast() : nullptr; - if (!elementType || elementType.getBitWidth() != 8 || + IntegerType elementType = + type ? type.getElementType().dyn_cast() : nullptr; + if (!elementType || elementType.getWidth() != 8 || type.getNumElements() != strAttr.getValue().size()) return op.emitOpError( "requires an i8 array type of the length equal to that of the string " @@ -1957,16 +1943,16 @@ if (!mlir::LLVM::isCompatibleFloatingPointType(valType)) return op.emitOpError("expected LLVM IR floating point type"); } else if (op.bin_op() == AtomicBinOp::xchg) { - auto intType = valType.dyn_cast(); - unsigned intBitWidth = intType ? intType.getBitWidth() : 0; + auto intType = valType.dyn_cast(); + unsigned intBitWidth = intType ? intType.getWidth() : 0; if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64 && !valType.isa() && !valType.isa() && !valType.isa() && !valType.isa()) return op.emitOpError("unexpected LLVM IR type for 'xchg' bin_op"); } else { - auto intType = valType.dyn_cast(); - unsigned intBitWidth = intType ? intType.getBitWidth() : 0; + auto intType = valType.dyn_cast(); + unsigned intBitWidth = intType ? intType.getWidth() : 0; if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64) return op.emitOpError("expected LLVM IR integer type"); @@ -2007,7 +1993,7 @@ parser.resolveOperand(val, type, result.operands)) return failure(); - auto boolType = LLVMIntegerType::get(builder.getContext(), 1); + auto boolType = IntegerType::get(builder.getContext(), 1); auto resultType = LLVMStructType::getLiteral(builder.getContext(), {type, boolType}); result.addTypes(resultType); @@ -2024,8 +2010,8 @@ if (cmpType != ptrType.getElementType() || cmpType != valType) return op.emitOpError("expected LLVM IR element type for operand #0 to " "match type for all other operands"); - auto intType = valType.dyn_cast(); - unsigned intBitWidth = intType ? intType.getBitWidth() : 0; + auto intType = valType.dyn_cast(); + unsigned intBitWidth = intType ? intType.getWidth() : 0; if (!valType.isa() && intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64 && !valType.isa() && !valType.isa() && @@ -2102,7 +2088,6 @@ LLVMLabelType, LLVMMetadataType, LLVMFunctionType, - LLVMIntegerType, LLVMPointerType, LLVMFixedVectorType, LLVMScalableVectorType, @@ -2199,8 +2184,7 @@ // Create the global at the entry of the module. OpBuilder moduleBuilder(module.getBodyRegion()); MLIRContext *ctx = builder.getContext(); - auto type = LLVM::LLVMArrayType::get(LLVM::LLVMIntegerType::get(ctx, 8), - value.size()); + auto type = LLVM::LLVMArrayType::get(IntegerType::get(ctx, 8), value.size()); auto global = moduleBuilder.create( loc, type, /*isConstant=*/true, linkage, name, builder.getStringAttr(value)); @@ -2208,10 +2192,10 @@ // Get the pointer to the first character in the global string. Value globalPtr = builder.create(loc, global); Value cst0 = builder.create( - loc, LLVM::LLVMIntegerType::get(ctx, 64), + loc, IntegerType::get(ctx, 64), builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( - loc, LLVM::LLVMPointerType::get(LLVMIntegerType::get(ctx, 8)), globalPtr, + loc, LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)), globalPtr, ValueRange{cst0, cst0}); } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp @@ -24,7 +24,7 @@ /// internal functions to avoid getting a verbose `!llvm` prefix. Otherwise /// prints it as usual. static void dispatchPrint(DialectAsmPrinter &printer, Type type) { - if (isCompatibleType(type)) + if (isCompatibleType(type) && !type.isa()) return mlir::LLVM::detail::printType(type, printer); printer.printType(type); } @@ -45,7 +45,6 @@ .Case([&](Type) { return "label"; }) .Case([&](Type) { return "metadata"; }) .Case([&](Type) { return "func"; }) - .Case([&](Type) { return "i"; }) .Case([&](Type) { return "ptr"; }) .Case([&](Type) { return "vec"; }) .Case([&](Type) { return "array"; }) @@ -147,11 +146,6 @@ printer << getTypeKeyword(type); - if (auto intType = type.dyn_cast()) { - printer << intType.getBitWidth(); - return; - } - if (auto ptrType = type.dyn_cast()) { printer << '<'; dispatchPrint(printer, ptrType.getElementType()); @@ -416,26 +410,30 @@ /// will try to parse any type in full form (including types with the `!llvm` /// prefix), and on failure fall back to parsing the short-hand version of the /// LLVM dialect types without the `!llvm` prefix. -static Type dispatchParse(DialectAsmParser &parser) { - Type type; +static Type dispatchParse(DialectAsmParser &parser, bool allowAny = true) { llvm::SMLoc keyLoc = parser.getCurrentLocation(); - Location loc = parser.getEncodedSourceLoc(keyLoc); - OptionalParseResult parseResult = parser.parseOptionalType(type); - if (parseResult.hasValue()) { - if (failed(*parseResult)) - return Type(); - - // Special case for integers (i[1-9][0-9]*) that are literals rather than - // keywords for the parser, so they are not caught by the main dispatch - // below. Try parsing it a built-in integer type instead. - auto intType = type.dyn_cast(); - if (!intType || !intType.isSignless()) - return type; - - return LLVMIntegerType::getChecked(loc, intType.getWidth()); + + // Try parsing any MLIR type. + Type type; + OptionalParseResult result = parser.parseOptionalType(type); + if (result.hasValue()) { + if (failed(result.getValue())) + return nullptr; + // TODO: integer types are temporarily allowed for compatibility with the + // deprecated !llvm.i[0-9]+ syntax. + if (!allowAny) { + auto intType = type.dyn_cast(); + if (!intType || !intType.isSignless()) { + parser.emitError(keyLoc) << "unexpected type, expected keyword"; + return nullptr; + } + Location loc = parser.getEncodedSourceLoc(keyLoc); + emitWarning(loc) << "deprecated syntax, drop '!llvm.' for integers"; + } + return type; } - // Dispatch to concrete functions. + // If no type found, fallback to the shorthand form. StringRef key; if (failed(parser.parseKeyword(&key))) return Type(); @@ -474,11 +472,11 @@ /// Parses one of the LLVM dialect types. Type mlir::LLVM::detail::parseType(DialectAsmParser &parser) { llvm::SMLoc loc = parser.getCurrentLocation(); - Type type = dispatchParse(parser); + Type type = dispatchParse(parser, /*allowAny=*/false); if (!type) return type; if (!isCompatibleType(type)) { - parser.emitError(loc) << "unexpected type, expected i* or keyword"; + parser.emitError(loc) << "unexpected type, expected keyword"; return nullptr; } return type; diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp @@ -15,6 +15,7 @@ #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/IR/TypeSupport.h" @@ -110,28 +111,6 @@ return success(); } -//===----------------------------------------------------------------------===// -// Integer type. -//===----------------------------------------------------------------------===// - -LLVMIntegerType LLVMIntegerType::get(MLIRContext *ctx, unsigned bitwidth) { - return Base::get(ctx, bitwidth); -} - -LLVMIntegerType LLVMIntegerType::getChecked(Location loc, unsigned bitwidth) { - return Base::getChecked(loc, bitwidth); -} - -unsigned LLVMIntegerType::getBitWidth() { return getImpl()->bitwidth; } - -LogicalResult LLVMIntegerType::verifyConstructionInvariants(Location loc, - unsigned bitwidth) { - constexpr int maxSupportedBitwidth = (1 << 24); - if (bitwidth >= maxSupportedBitwidth) - return emitError(loc, "integer type too wide"); - return success(); -} - //===----------------------------------------------------------------------===// // Pointer type. //===----------------------------------------------------------------------===// @@ -258,7 +237,9 @@ //===----------------------------------------------------------------------===// bool LLVMVectorType::isValidElementType(Type type) { - return type.isa() || + if (auto intType = type.dyn_cast()) + return intType.isSignless(); + return type.isa() || mlir::LLVM::isCompatibleFloatingPointType(type); } @@ -330,6 +311,34 @@ // Utility functions. //===----------------------------------------------------------------------===// +bool mlir::LLVM::isCompatibleType(Type type) { + // Only signless integers are compatible. + if (auto intType = type.dyn_cast()) + return intType.isSignless(); + + // clang-format off + return type.isa< + LLVMArrayType, + LLVMBFloatType, + LLVMDoubleType, + LLVMFP128Type, + LLVMFloatType, + LLVMFunctionType, + LLVMHalfType, + LLVMLabelType, + LLVMMetadataType, + LLVMPPCFP128Type, + LLVMPointerType, + LLVMStructType, + LLVMTokenType, + LLVMVectorType, + LLVMVoidType, + LLVMX86FP80Type, + LLVMX86MMXType + >(); + // clang-format on +} + llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) { assert(isCompatibleType(type) && "expected a type compatible with the LLVM dialect"); @@ -340,8 +349,8 @@ .Case([](Type) { return llvm::TypeSize::Fixed(32); }) .Case( [](Type) { return llvm::TypeSize::Fixed(64); }) - .Case([](LLVMIntegerType intTy) { - return llvm::TypeSize::Fixed(intTy.getBitWidth()); + .Case([](IntegerType intTy) { + return llvm::TypeSize::Fixed(intTy.getWidth()); }) .Case([](Type) { return llvm::TypeSize::Fixed(80); }) .Case( diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -62,8 +62,7 @@ break; } - auto int32Ty = - LLVM::LLVMIntegerType::get(parser.getBuilder().getContext(), 32); + auto int32Ty = IntegerType::get(parser.getBuilder().getContext(), 32); return parser.resolveOperands(ops, {int32Ty, resultType, int32Ty, int32Ty}, parser.getNameLoc(), result.operands); } @@ -72,8 +71,8 @@ static ParseResult parseNVVMVoteBallotOp(OpAsmParser &parser, OperationState &result) { MLIRContext *context = parser.getBuilder().getContext(); - auto int32Ty = LLVM::LLVMIntegerType::get(context, 32); - auto int1Ty = LLVM::LLVMIntegerType::get(context, 1); + auto int32Ty = IntegerType::get(context, 32); + auto int1Ty = IntegerType::get(context, 1); SmallVector ops; Type type; diff --git a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp @@ -46,8 +46,8 @@ return failure(); MLIRContext *context = parser.getBuilder().getContext(); - auto int32Ty = LLVM::LLVMIntegerType::get(context, 32); - auto int1Ty = LLVM::LLVMIntegerType::get(context, 1); + auto int32Ty = IntegerType::get(context, 32); + auto int1Ty = IntegerType::get(context, 1); auto i32x4Ty = LLVM::LLVMFixedVectorType::get(int32Ty, 4); return parser.resolveOperands(ops, {i32x4Ty, int32Ty, int32Ty, int1Ty, int1Ty}, @@ -65,8 +65,8 @@ return failure(); MLIRContext *context = parser.getBuilder().getContext(); - auto int32Ty = LLVM::LLVMIntegerType::get(context, 32); - auto int1Ty = LLVM::LLVMIntegerType::get(context, 1); + auto int32Ty = IntegerType::get(context, 32); + auto int1Ty = IntegerType::get(context, 1); auto i32x4Ty = LLVM::LLVMFixedVectorType::get(int32Ty, 4); if (parser.resolveOperands(ops, diff --git a/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h b/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h --- a/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h +++ b/mlir/lib/Dialect/LLVMIR/IR/TypeDetail.h @@ -373,27 +373,6 @@ ArrayRef argumentTypes; }; -//===----------------------------------------------------------------------===// -// LLVMIntegerTypeStorage. -//===----------------------------------------------------------------------===// - -/// Storage type for LLVM dialect integer types. These are uniqued by bitwidth. -struct LLVMIntegerTypeStorage : public TypeStorage { - using KeyTy = unsigned; - - LLVMIntegerTypeStorage(unsigned width) : bitwidth(width) {} - - static LLVMIntegerTypeStorage *construct(TypeStorageAllocator &allocator, - const KeyTy &key) { - return new (allocator.allocate()) - LLVMIntegerTypeStorage(key); - } - - bool operator==(const KeyTy &key) const { return key == bitwidth; } - - unsigned bitwidth; -}; - //===----------------------------------------------------------------------===// // LLVMPointerTypeStorage. //===----------------------------------------------------------------------===// diff --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp --- a/mlir/lib/ExecutionEngine/JitRunner.cpp +++ b/mlir/lib/ExecutionEngine/JitRunner.cpp @@ -199,8 +199,8 @@ auto resultType = mainFunction.getType() .cast() .getReturnType() - .dyn_cast(); - if (!resultType || resultType.getBitWidth() != 32) + .dyn_cast(); + if (!resultType || resultType.getWidth() != 32) return make_string_error("only single llvm.i32 function result supported"); return Error::success(); } @@ -209,8 +209,8 @@ auto resultType = mainFunction.getType() .cast() .getReturnType() - .dyn_cast(); - if (!resultType || resultType.getBitWidth() != 64) + .dyn_cast(); + if (!resultType || resultType.getWidth() != 64) return make_string_error("only single llvm.i64 function result supported"); return Error::success(); } diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -172,8 +172,8 @@ if (!type) return nullptr; - if (auto intType = type.dyn_cast()) - return b.getIntegerType(intType.getBitWidth()); + if (auto intType = type.dyn_cast()) + return intType; if (type.isa()) return b.getF32Type(); @@ -244,7 +244,7 @@ if (auto *c = dyn_cast(value)) { if (c->getType()->isDoubleTy()) return b.getFloatAttr(FloatType::getF64(context), c->getValueAPF()); - else if (c->getType()->isFloatingPointTy()) + if (c->getType()->isFloatingPointTy()) return b.getFloatAttr(FloatType::getF32(context), c->getValueAPF()); } if (auto *f = dyn_cast(value)) @@ -261,7 +261,7 @@ if (!attrType) return nullptr; - if (type.isa()) { + if (type.isa()) { SmallVector values; values.reserve(cd->getNumElements()); for (unsigned i = 0, e = cd->getNumElements(); i < e; ++i) diff --git a/mlir/lib/Target/LLVMIR/TypeTranslation.cpp b/mlir/lib/Target/LLVMIR/TypeTranslation.cpp --- a/mlir/lib/Target/LLVMIR/TypeTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/TypeTranslation.cpp @@ -8,6 +8,7 @@ #include "mlir/Target/LLVMIR/TypeTranslation.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/MLIRContext.h" #include "llvm/ADT/TypeSwitch.h" @@ -71,10 +72,9 @@ .Case([this](LLVM::LLVMMetadataType) { return llvm::Type::getMetadataTy(context); }) - .Case( + .Case( [this](auto type) { return this->translate(type); }) .Default([](Type t) -> llvm::Type * { llvm_unreachable("unknown LLVM dialect type"); @@ -101,8 +101,8 @@ } /// Translates the given integer type. - llvm::Type *translate(LLVM::LLVMIntegerType type) { - return llvm::IntegerType::get(context, type.getBitWidth()); + llvm::Type *translate(IntegerType type) { + return llvm::IntegerType::get(context, type.getWidth()); } /// Translates the given pointer type. @@ -253,7 +253,7 @@ /// Translates the given integer type. Type translate(llvm::IntegerType *type) { - return LLVM::LLVMIntegerType::get(&context, type->getBitWidth()); + return IntegerType::get(&context, type->getBitWidth()); } /// Translates the given pointer type. diff --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir --- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir +++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir @@ -10,9 +10,9 @@ gpu.module @kernel_module attributes { nvvm.cubin = "CUBIN", rocdl.hsaco = "HSACO" } { - llvm.func @kernel(%arg0: !llvm.i32, %arg1: !llvm.ptr, - %arg2: !llvm.ptr, %arg3: !llvm.i64, %arg4: !llvm.i64, - %arg5: !llvm.i64) attributes {gpu.kernel} { + llvm.func @kernel(%arg0: i32, %arg1: !llvm.ptr, + %arg2: !llvm.ptr, %arg3: i64, %arg4: i64, + %arg5: i64) attributes {gpu.kernel} { llvm.return } } @@ -27,7 +27,7 @@ return } - // CHECK: [[C8:%.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 + // CHECK: [[C8:%.*]] = llvm.mlir.constant(8 : index) : i64 // CHECK: [[ADDRESSOF:%.*]] = llvm.mlir.addressof @[[GLOBAL]] // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) // CHECK: [[BINARY:%.*]] = llvm.getelementptr [[ADDRESSOF]]{{\[}}[[C0]], [[C0]]] @@ -39,7 +39,7 @@ // CHECK: [[C0_I32:%.*]] = llvm.mlir.constant(0 : i32) // CHECK: [[STREAM:%.*]] = llvm.call @mgpuStreamCreate - // CHECK: [[NUM_PARAMS:%.*]] = llvm.mlir.constant(6 : i32) : !llvm.i32 + // CHECK: [[NUM_PARAMS:%.*]] = llvm.mlir.constant(6 : i32) : i32 // CHECK-NEXT: [[PARAMS:%.*]] = llvm.alloca [[NUM_PARAMS]] x !llvm.ptr // CHECK: [[EXTRA_PARAMS:%.*]] = llvm.mlir.null : !llvm.ptr> diff --git a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir --- a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir +++ b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir @@ -5,31 +5,31 @@ // NVVM-LABEL: llvm.func @private gpu.func @private(%arg0: f32) private(%arg1: memref<4xf32, 5>) { // Allocate private memory inside the function. - // NVVM: %[[size:.*]] = llvm.mlir.constant(4 : i64) : !llvm.i64 - // NVVM: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (!llvm.i64) -> !llvm.ptr + // NVVM: %[[size:.*]] = llvm.mlir.constant(4 : i64) : i64 + // NVVM: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (i64) -> !llvm.ptr - // ROCDL: %[[size:.*]] = llvm.mlir.constant(4 : i64) : !llvm.i64 - // ROCDL: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (!llvm.i64) -> !llvm.ptr + // ROCDL: %[[size:.*]] = llvm.mlir.constant(4 : i64) : i64 + // ROCDL: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (i64) -> !llvm.ptr // Populate the memref descriptor. // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0] // NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1] - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // NVVM: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2] - // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 + // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 // NVVM: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0] - // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0] // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0] // ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1] - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // ROCDL: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2] - // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 + // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 // ROCDL: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0] - // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // ROCDL: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0] // "Store" lowering should work just as any other memref, only check that @@ -67,12 +67,12 @@ // ROCDL-SAME: { gpu.func @workgroup(%arg0: f32) workgroup(%arg1: memref<4xf32, 3>) { // Get the address of the first element in the global array. - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] // NVVM-SAME: !llvm.ptr - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] // ROCDL-SAME: !llvm.ptr @@ -81,21 +81,21 @@ // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0] // NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1] - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // NVVM: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2] - // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 + // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 // NVVM: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0] - // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0] // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0] // ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1] - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // ROCDL: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2] - // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 + // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 // ROCDL: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0] - // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // ROCDL: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0] // "Store" lowering should work just as any other memref, only check that @@ -130,12 +130,12 @@ // ROCDL-LABEL: llvm.func @workgroup3d gpu.func @workgroup3d(%arg0: f32) workgroup(%arg1: memref<4x2x6xf32, 3>) { // Get the address of the first element in the global array. - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] // NVVM-SAME: !llvm.ptr - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] // ROCDL-SAME: !llvm.ptr @@ -144,37 +144,37 @@ // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0] // NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1] - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // NVVM: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2] - // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 + // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 // NVVM: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0] - // NVVM: %[[c12:.*]] = llvm.mlir.constant(12 : index) : !llvm.i64 + // NVVM: %[[c12:.*]] = llvm.mlir.constant(12 : index) : i64 // NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c12]], %[[descr5]][4, 0] - // NVVM: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 + // NVVM: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64 // NVVM: %[[descr7:.*]] = llvm.insertvalue %[[c2]], %[[descr6]][3, 1] - // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64 + // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64 // NVVM: %[[descr8:.*]] = llvm.insertvalue %[[c6]], %[[descr7]][4, 1] - // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64 + // NVVM: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64 // NVVM: %[[descr9:.*]] = llvm.insertvalue %[[c6]], %[[descr8]][3, 2] - // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // NVVM: %[[descr10:.*]] = llvm.insertvalue %[[c1]], %[[descr9]][4, 2] // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0] // ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1] - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // ROCDL: %[[descr4:.*]] = llvm.insertvalue %[[c0]], %[[descr3]][2] - // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 + // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 // ROCDL: %[[descr5:.*]] = llvm.insertvalue %[[c4]], %[[descr4]][3, 0] - // ROCDL: %[[c12:.*]] = llvm.mlir.constant(12 : index) : !llvm.i64 + // ROCDL: %[[c12:.*]] = llvm.mlir.constant(12 : index) : i64 // ROCDL: %[[descr6:.*]] = llvm.insertvalue %[[c12]], %[[descr5]][4, 0] - // ROCDL: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 + // ROCDL: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64 // ROCDL: %[[descr7:.*]] = llvm.insertvalue %[[c2]], %[[descr6]][3, 1] - // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64 + // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64 // ROCDL: %[[descr8:.*]] = llvm.insertvalue %[[c6]], %[[descr7]][4, 1] - // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : !llvm.i64 + // ROCDL: %[[c6:.*]] = llvm.mlir.constant(6 : index) : i64 // ROCDL: %[[descr9:.*]] = llvm.insertvalue %[[c6]], %[[descr8]][3, 2] - // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // ROCDL: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // ROCDL: %[[descr10:.*]] = llvm.insertvalue %[[c1]], %[[descr9]][4, 2] %c0 = constant 0 : index @@ -212,14 +212,14 @@ // Private buffers. // NVVM: %[[c3:.*]] = llvm.mlir.constant(3 : i64) - // NVVM: llvm.alloca %[[c3]] x !llvm.float : (!llvm.i64) -> !llvm.ptr + // NVVM: llvm.alloca %[[c3]] x !llvm.float : (i64) -> !llvm.ptr // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : i64) - // NVVM: llvm.alloca %[[c4]] x !llvm.float : (!llvm.i64) -> !llvm.ptr + // NVVM: llvm.alloca %[[c4]] x !llvm.float : (i64) -> !llvm.ptr // ROCDL: %[[c3:.*]] = llvm.mlir.constant(3 : i64) - // ROCDL: llvm.alloca %[[c3]] x !llvm.float : (!llvm.i64) -> !llvm.ptr + // ROCDL: llvm.alloca %[[c3]] x !llvm.float : (i64) -> !llvm.ptr // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : i64) - // ROCDL: llvm.alloca %[[c4]] x !llvm.float : (!llvm.i64) -> !llvm.ptr + // ROCDL: llvm.alloca %[[c4]] x !llvm.float : (i64) -> !llvm.ptr %c0 = constant 0 : index store %arg0, %arg1[%c0] : memref<1xf32, 3> diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -7,46 +7,46 @@ func @gpu_index_ops() -> (index, index, index, index, index, index, index, index, index, index, index, index) { - // CHECK32-NOT: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64 - // CHECK: = nvvm.read.ptx.sreg.tid.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.tid.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %tIdX = "gpu.thread_id"() {dimension = "x"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.tid.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.tid.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %tIdY = "gpu.thread_id"() {dimension = "y"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.tid.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.tid.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %tIdZ = "gpu.thread_id"() {dimension = "z"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.ntid.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.ntid.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bDimX = "gpu.block_dim"() {dimension = "x"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.ntid.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.ntid.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bDimY = "gpu.block_dim"() {dimension = "y"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.ntid.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.ntid.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bDimZ = "gpu.block_dim"() {dimension = "z"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.ctaid.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.ctaid.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bIdX = "gpu.block_id"() {dimension = "x"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.ctaid.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.ctaid.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bIdY = "gpu.block_id"() {dimension = "y"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.ctaid.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.ctaid.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bIdZ = "gpu.block_id"() {dimension = "z"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.nctaid.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.nctaid.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %gDimX = "gpu.grid_dim"() {dimension = "x"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.nctaid.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.nctaid.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %gDimY = "gpu.grid_dim"() {dimension = "y"} : () -> (index) - // CHECK: = nvvm.read.ptx.sreg.nctaid.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: = nvvm.read.ptx.sreg.nctaid.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %gDimZ = "gpu.grid_dim"() {dimension = "z"} : () -> (index) std.return %tIdX, %tIdY, %tIdZ, %bDimX, %bDimY, %bDimZ, @@ -62,11 +62,11 @@ // CHECK-LABEL: func @gpu_index_comp // CHECK32-LABEL: func @gpu_index_comp func @gpu_index_comp(%idx : index) -> index { - // CHECK: = llvm.add %{{.*}}, %{{.*}} : !llvm.i64 - // CHECK32: = llvm.add %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: = llvm.add %{{.*}}, %{{.*}} : i64 + // CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32 %0 = addi %idx, %idx : index - // CHECK: llvm.return %{{.*}} : !llvm.i64 - // CHECK32: llvm.return %{{.*}} : !llvm.i32 + // CHECK: llvm.return %{{.*}} : i64 + // CHECK32: llvm.return %{{.*}} : i32 std.return %0 : index } } @@ -112,14 +112,14 @@ func @gpu_shuffle() -> (f32) { // CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float %arg0 = constant 1.0 : f32 - // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : !llvm.i32 + // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : i32 %arg1 = constant 4 : i32 - // CHECK: %[[#WIDTH:]] = llvm.mlir.constant(23 : i32) : !llvm.i32 + // CHECK: %[[#WIDTH:]] = llvm.mlir.constant(23 : i32) : i32 %arg2 = constant 23 : i32 - // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[#SHL:]] = llvm.shl %[[#ONE]], %[[#WIDTH]] : !llvm.i32 - // CHECK: %[[#MASK:]] = llvm.sub %[[#SHL]], %[[#ONE]] : !llvm.i32 - // CHECK: %[[#CLAMP:]] = llvm.sub %[[#WIDTH]], %[[#ONE]] : !llvm.i32 + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#SHL:]] = llvm.shl %[[#ONE]], %[[#WIDTH]] : i32 + // CHECK: %[[#MASK:]] = llvm.sub %[[#SHL]], %[[#ONE]] : i32 + // CHECK: %[[#CLAMP:]] = llvm.sub %[[#WIDTH]], %[[#ONE]] : i32 // CHECK: %[[#SHFL:]] = nvvm.shfl.sync.bfly %[[#MASK]], %[[#VALUE]], %[[#OFFSET]], %[[#CLAMP]] : !llvm.struct<(float, i1)> // CHECK: llvm.extractvalue %[[#SHFL]][0 : index] : !llvm.struct<(float, i1)> // CHECK: llvm.extractvalue %[[#SHFL]][1 : index] : !llvm.struct<(float, i1)> diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir --- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir +++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir @@ -7,46 +7,46 @@ func @gpu_index_ops() -> (index, index, index, index, index, index, index, index, index, index, index, index) { - // CHECK32-NOT: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64 - // CHECK: rocdl.workitem.id.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workitem.id.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %tIdX = "gpu.thread_id"() {dimension = "x"} : () -> (index) - // CHECK: rocdl.workitem.id.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workitem.id.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %tIdY = "gpu.thread_id"() {dimension = "y"} : () -> (index) - // CHECK: rocdl.workitem.id.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workitem.id.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %tIdZ = "gpu.thread_id"() {dimension = "z"} : () -> (index) - // CHECK: rocdl.workgroup.dim.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workgroup.dim.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bDimX = "gpu.block_dim"() {dimension = "x"} : () -> (index) - // CHECK: rocdl.workgroup.dim.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workgroup.dim.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bDimY = "gpu.block_dim"() {dimension = "y"} : () -> (index) - // CHECK: rocdl.workgroup.dim.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workgroup.dim.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bDimZ = "gpu.block_dim"() {dimension = "z"} : () -> (index) - // CHECK: rocdl.workgroup.id.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workgroup.id.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bIdX = "gpu.block_id"() {dimension = "x"} : () -> (index) - // CHECK: rocdl.workgroup.id.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workgroup.id.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bIdY = "gpu.block_id"() {dimension = "y"} : () -> (index) - // CHECK: rocdl.workgroup.id.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.workgroup.id.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %bIdZ = "gpu.block_id"() {dimension = "z"} : () -> (index) - // CHECK: rocdl.grid.dim.x : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.grid.dim.x : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %gDimX = "gpu.grid_dim"() {dimension = "x"} : () -> (index) - // CHECK: rocdl.grid.dim.y : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.grid.dim.y : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %gDimY = "gpu.grid_dim"() {dimension = "y"} : () -> (index) - // CHECK: rocdl.grid.dim.z : !llvm.i32 - // CHECK: = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: rocdl.grid.dim.z : i32 + // CHECK: = llvm.sext %{{.*}} : i32 to i64 %gDimZ = "gpu.grid_dim"() {dimension = "z"} : () -> (index) std.return %tIdX, %tIdY, %tIdZ, %bDimX, %bDimY, %bDimZ, @@ -62,11 +62,11 @@ // CHECK-LABEL: func @gpu_index_comp // CHECK32-LABEL: func @gpu_index_comp func @gpu_index_comp(%idx : index) -> index { - // CHECK: = llvm.add %{{.*}}, %{{.*}} : !llvm.i64 - // CHECK32: = llvm.add %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: = llvm.add %{{.*}}, %{{.*}} : i64 + // CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32 %0 = addi %idx, %idx : index - // CHECK: llvm.return %{{.*}} : !llvm.i64 - // CHECK32: llvm.return %{{.*}} : !llvm.i32 + // CHECK: llvm.return %{{.*}} : i64 + // CHECK32: llvm.return %{{.*}} : i32 std.return %0 : index } } diff --git a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir --- a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir +++ b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir @@ -6,58 +6,58 @@ // CHECK: %[[addressof_SPIRV_BIN:.*]] = llvm.mlir.addressof @SPIRV_BIN // CHECK: %[[SPIRV_BIN_ptr:.*]] = llvm.getelementptr %[[addressof_SPIRV_BIN]] // CHECK: %[[SPIRV_BIN_size:.*]] = llvm.mlir.constant -// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void -// CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) : (!llvm.ptr, !llvm.ptr, !llvm.i32) -> !llvm.void +// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, i32, i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void +// CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) : (!llvm.ptr, !llvm.ptr, i32) -> !llvm.void // CHECK: %[[addressof_entry_point:.*]] = llvm.mlir.addressof @kernel_spv_entry_point_name // CHECK: %[[entry_point_ptr:.*]] = llvm.getelementptr %[[addressof_entry_point]] // CHECK: llvm.call @setEntryPoint(%[[Vulkan_Runtime_ptr]], %[[entry_point_ptr]]) : (!llvm.ptr, !llvm.ptr) -> !llvm.void -// CHECK: llvm.call @setNumWorkGroups(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64) -> !llvm.void +// CHECK: llvm.call @setNumWorkGroups(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, i64, i64, i64) -> !llvm.void // CHECK: llvm.call @runOnVulkan(%[[Vulkan_Runtime_ptr]]) : (!llvm.ptr) -> !llvm.void // CHECK: llvm.call @deinitVulkan(%[[Vulkan_Runtime_ptr]]) : (!llvm.ptr) -> !llvm.void -// CHECK: llvm.func @bindMemRef1DHalf(!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) +// CHECK: llvm.func @bindMemRef1DHalf(!llvm.ptr, i32, i32, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) module attributes {gpu.container_module} { - llvm.func @malloc(!llvm.i64) -> !llvm.ptr + llvm.func @malloc(i64) -> !llvm.ptr llvm.func @foo() { - %0 = llvm.mlir.constant(12 : index) : !llvm.i64 + %0 = llvm.mlir.constant(12 : index) : i64 %1 = llvm.mlir.null : !llvm.ptr - %2 = llvm.mlir.constant(1 : index) : !llvm.i64 - %3 = llvm.getelementptr %1[%2] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr - %4 = llvm.ptrtoint %3 : !llvm.ptr to !llvm.i64 - %5 = llvm.mul %0, %4 : !llvm.i64 - %6 = llvm.call @malloc(%5) : (!llvm.i64) -> !llvm.ptr + %2 = llvm.mlir.constant(1 : index) : i64 + %3 = llvm.getelementptr %1[%2] : (!llvm.ptr, i64) -> !llvm.ptr + %4 = llvm.ptrtoint %3 : !llvm.ptr to i64 + %5 = llvm.mul %0, %4 : i64 + %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr %7 = llvm.bitcast %6 : !llvm.ptr to !llvm.ptr %8 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %9 = llvm.insertvalue %7, %8[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %10 = llvm.insertvalue %7, %9[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - %11 = llvm.mlir.constant(0 : index) : !llvm.i64 + %11 = llvm.mlir.constant(0 : index) : i64 %12 = llvm.insertvalue %11, %10[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - %13 = llvm.mlir.constant(1 : index) : !llvm.i64 + %13 = llvm.mlir.constant(1 : index) : i64 %14 = llvm.insertvalue %0, %12[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %15 = llvm.insertvalue %13, %14[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - %16 = llvm.mlir.constant(1 : index) : !llvm.i64 + %16 = llvm.mlir.constant(1 : index) : i64 %17 = llvm.extractvalue %15[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %18 = llvm.extractvalue %15[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %19 = llvm.extractvalue %15[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %20 = llvm.extractvalue %15[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %21 = llvm.extractvalue %15[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> llvm.call @vulkanLaunch(%16, %16, %16, %17, %18, %19, %20, %21) {spirv_blob = "\03\02#\07\00", spirv_entry_point = "kernel"} - : (!llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64) -> () + : (i64, i64, i64, !llvm.ptr, !llvm.ptr, i64, i64, i64) -> () llvm.return } - llvm.func @vulkanLaunch(%arg0: !llvm.i64, %arg1: !llvm.i64, %arg2: !llvm.i64, %arg6: !llvm.ptr, %arg7: !llvm.ptr, %arg8: !llvm.i64, %arg9: !llvm.i64, %arg10: !llvm.i64) { + llvm.func @vulkanLaunch(%arg0: i64, %arg1: i64, %arg2: i64, %arg6: !llvm.ptr, %arg7: !llvm.ptr, %arg8: i64, %arg9: i64, %arg10: i64) { %0 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %1 = llvm.insertvalue %arg6, %0[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %2 = llvm.insertvalue %arg7, %1[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %3 = llvm.insertvalue %arg8, %2[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %4 = llvm.insertvalue %arg9, %3[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %5 = llvm.insertvalue %arg10, %4[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - %6 = llvm.mlir.constant(1 : index) : !llvm.i64 - %7 = llvm.alloca %6 x !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> : (!llvm.i64) -> !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>> + %6 = llvm.mlir.constant(1 : index) : i64 + %7 = llvm.alloca %6 x !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> : (i64) -> !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>> llvm.store %5, %7 : !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>> - llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (!llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) -> () + llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (i64, i64, i64, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) -> () llvm.return } - llvm.func @_mlir_ciface_vulkanLaunch(!llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) + llvm.func @_mlir_ciface_vulkanLaunch(i64, i64, i64, !llvm.ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>>) } diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir --- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir @@ -6,17 +6,17 @@ %end = constant 0 : index // CHECK: omp.parallel omp.parallel { - // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : !llvm.i64, !llvm.i64 + // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64 br ^bb1(%start, %end : index, index) - // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: !llvm.i64, %[[ARG2:[0-9]+]]: !llvm.i64):{{.*}} + // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: i64, %[[ARG2:[0-9]+]]: i64):{{.*}} ^bb1(%0: index, %1: index): - // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : !llvm.i64 + // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : i64 %2 = cmpi "slt", %0, %1 : index - // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : !llvm.i64, !llvm.i64), ^[[BB3:.*]] + // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64), ^[[BB3:.*]] cond_br %2, ^bb2(%end, %end : index, index), ^bb3 - // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: !llvm.i64, %[[ARG4:[0-9]+]]: !llvm.i64): + // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: i64, %[[ARG4:[0-9]+]]: i64): ^bb2(%3: index, %4: index): - // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : !llvm.i64, !llvm.i64) + // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : i64, i64) br ^bb1(%3, %4 : index, index) // CHECK-NEXT: ^[[BB3]]: ^bb3: @@ -30,16 +30,16 @@ } // CHECK-LABEL: @wsloop -// CHECK: (%[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64, %[[ARG3:.*]]: !llvm.i64, %[[ARG4:.*]]: !llvm.i64, %[[ARG5:.*]]: !llvm.i64) +// CHECK: (%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64, %[[ARG3:.*]]: i64, %[[ARG4:.*]]: i64, %[[ARG5:.*]]: i64) func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) { // CHECK: omp.parallel omp.parallel { // CHECK: omp.wsloop // CHECK: (%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]], %[[ARG5]]) "omp.wsloop"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) ( { - // CHECK: ^{{.*}}(%[[ARG6:.*]]: !llvm.i64, %[[ARG7:.*]]: !llvm.i64): + // CHECK: ^{{.*}}(%[[ARG6:.*]]: i64, %[[ARG7:.*]]: i64): ^bb0(%arg6: index, %arg7: index): // no predecessors - // CHECK: "test.payload"(%[[ARG6]], %[[ARG7]]) : (!llvm.i64, !llvm.i64) -> () + // CHECK: "test.payload"(%[[ARG6]], %[[ARG7]]) : (i64, i64) -> () "test.payload"(%arg6, %arg7) : (index, index) -> () omp.yield }) {operand_segment_sizes = dense<[2, 2, 2, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (index, index, index, index, index, index) -> () diff --git a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir @@ -6,7 +6,7 @@ // CHECK-LABEL: @iadd_scalar spv.func @iadd_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.add %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.add %{{.*}}, %{{.*}} : i32 %0 = spv.IAdd %arg0, %arg1 : i32 spv.Return } @@ -24,7 +24,7 @@ // CHECK-LABEL: @isub_scalar spv.func @isub_scalar(%arg0: i8, %arg1: i8) "None" { - // CHECK: llvm.sub %{{.*}}, %{{.*}} : !llvm.i8 + // CHECK: llvm.sub %{{.*}}, %{{.*}} : i8 %0 = spv.ISub %arg0, %arg1 : i8 spv.Return } @@ -42,7 +42,7 @@ // CHECK-LABEL: @imul_scalar spv.func @imul_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.mul %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.mul %{{.*}}, %{{.*}} : i32 %0 = spv.IMul %arg0, %arg1 : i32 spv.Return } @@ -168,7 +168,7 @@ // CHECK-LABEL: @udiv_scalar spv.func @udiv_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.udiv %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.udiv %{{.*}}, %{{.*}} : i32 %0 = spv.UDiv %arg0, %arg1 : i32 spv.Return } @@ -186,7 +186,7 @@ // CHECK-LABEL: @umod_scalar spv.func @umod_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.urem %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.urem %{{.*}}, %{{.*}} : i32 %0 = spv.UMod %arg0, %arg1 : i32 spv.Return } @@ -204,7 +204,7 @@ // CHECK-LABEL: @sdiv_scalar spv.func @sdiv_scalar(%arg0: i16, %arg1: i16) "None" { - // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : !llvm.i16 + // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : i16 %0 = spv.SDiv %arg0, %arg1 : i16 spv.Return } @@ -222,7 +222,7 @@ // CHECK-LABEL: @srem_scalar spv.func @srem_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.srem %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.srem %{{.*}}, %{{.*}} : i32 %0 = spv.SRem %arg0, %arg1 : i32 spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir @@ -6,7 +6,7 @@ // CHECK-LABEL: @bitcount_scalar spv.func @bitcount_scalar(%arg0: i16) "None" { - // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (!llvm.i16) -> !llvm.i16 + // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (i16) -> i16 %0 = spv.BitCount %arg0: i16 spv.Return } @@ -24,7 +24,7 @@ // CHECK-LABEL: @bitreverse_scalar spv.func @bitreverse_scalar(%arg0: i64) "None" { - // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (!llvm.i64) -> !llvm.i64 + // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (i64) -> i64 %0 = spv.BitReverse %arg0: i64 spv.Return } @@ -41,67 +41,67 @@ //===----------------------------------------------------------------------===// // CHECK-LABEL: @bitfield_insert_scalar_same_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[INSERT:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +// CHECK-SAME: %[[BASE:.*]]: i32, %[[INSERT:.*]]: i32, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32 spv.func @bitfield_insert_scalar_same_bit_width(%base: i32, %insert: i32, %offset: i32, %count: i32) "None" { - // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32 - // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32 - // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32 - // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET]] : !llvm.i32 - // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i32 - // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i32 - // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : !llvm.i32 - // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i32 + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i32 + // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32 + // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET]] : i32 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : i32 + // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i32 + // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : i32 + // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i32 %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32 spv.Return } // CHECK-LABEL: @bitfield_insert_scalar_smaller_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[INSERT:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8 +// CHECK-SAME: %[[BASE:.*]]: i64, %[[INSERT:.*]]: i64, %[[OFFSET:.*]]: i8, %[[COUNT:.*]]: i8 spv.func @bitfield_insert_scalar_smaller_bit_width(%base: i64, %insert: i64, %offset: i8, %count: i8) "None" { - // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i64 - // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i64 - // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : !llvm.i64 - // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i64 - // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i64 - // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[EXT_OFFSET]] : !llvm.i64 - // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i64 - // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i64 - // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : !llvm.i64 - // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i64 + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i8 to i64 + // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i64 + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : i64 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : i64 + // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i64 + // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[EXT_OFFSET]] : i64 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : i64 + // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i64 + // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : i64 + // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i64 %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i64, i8, i8 spv.Return } // CHECK-LABEL: @bitfield_insert_scalar_greater_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i16, %[[INSERT:.*]]: !llvm.i16, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i64 +// CHECK-SAME: %[[BASE:.*]]: i16, %[[INSERT:.*]]: i16, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i64 spv.func @bitfield_insert_scalar_greater_bit_width(%base: i16, %insert: i16, %offset: i32, %count: i64) "None" { - // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i32 to !llvm.i16 - // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i16 - // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : !llvm.i16 - // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : !llvm.i16 - // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i16 - // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[TRUNC_OFFSET]] : !llvm.i16 - // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i16 - // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i16 - // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : !llvm.i16 - // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i16 + // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i32 to i16 + // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : i64 to i16 + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : i16 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : i16 + // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i16 + // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[TRUNC_OFFSET]] : i16 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : i16 + // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i16 + // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : i16 + // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i16 %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i16, i32, i64 spv.Return } // CHECK-LABEL: @bitfield_insert_vector -// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[INSERT:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[INSERT:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32 spv.func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %offset: i32, %count: i32) "None" { // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32> - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32> // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32> - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32> // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm.vec<2 x i32> // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm.vec<2 x i32> // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.vec<2 x i32> @@ -119,61 +119,61 @@ //===----------------------------------------------------------------------===// // CHECK-LABEL: @bitfield_sextract_scalar_same_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64 +// CHECK-SAME: %[[BASE:.*]]: i64, %[[OFFSET:.*]]: i64, %[[COUNT:.*]]: i64 spv.func @bitfield_sextract_scalar_same_bit_width(%base: i64, %offset: i64, %count: i64) "None" { - // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : !llvm.i64 - // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : !llvm.i64 - // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i64 - // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i64 - // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : !llvm.i64 - // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i64 + // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : i64 + // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : i64 + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i64 + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i64 + // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : i64 + // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i64 %0 = spv.BitFieldSExtract %base, %offset, %count : i64, i64, i64 spv.Return } // CHECK-LABEL: @bitfield_sextract_scalar_smaller_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8 +// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i8, %[[COUNT:.*]]: i8 spv.func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %count: i8) "None" { - // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i32 - // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32 - // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32 - // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : !llvm.i32 - // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32 - // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32 - // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : !llvm.i32 - // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32 + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i8 to i32 + // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i32 + // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : i32 + // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : i32 + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i32 + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i32 + // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : i32 + // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i32 %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i8, i8 spv.Return } // CHECK-LABEL: @bitfield_sextract_scalar_greater_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64 +// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i64, %[[COUNT:.*]]: i64 spv.func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %count: i64) "None" { - // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i64 to !llvm.i32 - // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i32 - // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32 - // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : !llvm.i32 - // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32 - // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32 - // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : !llvm.i32 - // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32 + // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i64 to i32 + // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : i64 to i32 + // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : i32 + // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : i32 + // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i32 + // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i32 + // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : i32 + // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i32 %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i64, i64 spv.Return } // CHECK-LABEL: @bitfield_sextract_vector -// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32 spv.func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" { // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32> - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32> // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32> - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32> // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm.vec<2 x i32> // CHECK: %[[T0:.*]] = llvm.add %[[COUNT_V2]], %[[OFFSET_V2]] : !llvm.vec<2 x i32> // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.vec<2 x i32> @@ -189,57 +189,57 @@ //===----------------------------------------------------------------------===// // CHECK-LABEL: @bitfield_uextract_scalar_same_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32 spv.func @bitfield_uextract_scalar_same_bit_width(%base: i32, %offset: i32, %count: i32) "None" { - // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32 - // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32 - // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32 - // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : !llvm.i32 - // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32 + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i32 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32 + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : i32 + // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i32 %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32 spv.Return } // CHECK-LABEL: @bitfield_uextract_scalar_smaller_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8 +// CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i16, %[[COUNT:.*]]: i8 spv.func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %count: i8) "None" { - // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i16 to !llvm.i32 - // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32 - // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32 - // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i32 - // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32 - // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : !llvm.i32 - // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32 + // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i16 to i32 + // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i32 + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : i32 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32 + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : i32 + // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i32 %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i16, i8 spv.Return } // CHECK-LABEL: @bitfield_uextract_scalar_greater_bit_width -// CHECK-SAME: %[[BASE:.*]]: !llvm.i8, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8 +// CHECK-SAME: %[[BASE:.*]]: i8, %[[OFFSET:.*]]: i16, %[[COUNT:.*]]: i8 spv.func @bitfield_uextract_scalar_greater_bit_width(%base: i8, %offset: i16, %count: i8) "None" { - // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i16 to !llvm.i8 - // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : !llvm.i8 - // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i8 - // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i8 - // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : !llvm.i8 - // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i8 + // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i16 to i8 + // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : i8 + // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i8 + // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i8 + // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : i8 + // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i8 %0 = spv.BitFieldUExtract %base, %offset, %count : i8, i16, i8 spv.Return } // CHECK-LABEL: @bitfield_uextract_vector -// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32 +// CHECK-SAME: %[[BASE:.*]]: !llvm.vec<2 x i32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32 spv.func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" { // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32> - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32> // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm.vec<2 x i32> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm.vec<2 x i32> - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm.vec<2 x i32> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : i32] : !llvm.vec<2 x i32> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : i32] : !llvm.vec<2 x i32> // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm.vec<2 x i32> // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm.vec<2 x i32> // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.vec<2 x i32> @@ -255,7 +255,7 @@ // CHECK-LABEL: @bitwise_and_scalar spv.func @bitwise_and_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.and %{{.*}}, %{{.*}} : i32 %0 = spv.BitwiseAnd %arg0, %arg1 : i32 spv.Return } @@ -273,7 +273,7 @@ // CHECK-LABEL: @bitwise_or_scalar spv.func @bitwise_or_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.or %{{.*}}, %{{.*}} : i64 %0 = spv.BitwiseOr %arg0, %arg1 : i64 spv.Return } @@ -291,7 +291,7 @@ // CHECK-LABEL: @bitwise_xor_scalar spv.func @bitwise_xor_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.xor %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.xor %{{.*}}, %{{.*}} : i32 %0 = spv.BitwiseXor %arg0, %arg1 : i32 spv.Return } @@ -309,8 +309,8 @@ // CHECK-LABEL: @not_scalar spv.func @not_scalar(%arg0: i32) "None" { - // CHECK: %[[CONST:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32 - // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm.i32 + // CHECK: %[[CONST:.*]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: llvm.xor %{{.*}}, %[[CONST]] : i32 %0 = spv.Not %arg0 : i32 spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir @@ -6,7 +6,7 @@ // CHECK-LABEL: @bitcast_float_to_integer_scalar spv.func @bitcast_float_to_integer_scalar(%arg0 : f32) "None" { - // CHECK: llvm.bitcast {{.*}} : !llvm.float to !llvm.i32 + // CHECK: llvm.bitcast {{.*}} : !llvm.float to i32 %0 = spv.Bitcast %arg0: f32 to i32 spv.Return } @@ -20,7 +20,7 @@ // CHECK-LABEL: @bitcast_vector_to_scalar spv.func @bitcast_vector_to_scalar(%arg0 : vector<2xf32>) "None" { - // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<2 x float> to !llvm.i64 + // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<2 x float> to i64 %0 = spv.Bitcast %arg0: vector<2xf32> to i64 spv.Return } @@ -52,7 +52,7 @@ // CHECK-LABEL: @convert_float_to_signed_scalar spv.func @convert_float_to_signed_scalar(%arg0: f32) "None" { - // CHECK: llvm.fptosi %{{.*}} : !llvm.float to !llvm.i32 + // CHECK: llvm.fptosi %{{.*}} : !llvm.float to i32 %0 = spv.ConvertFToS %arg0: f32 to i32 spv.Return } @@ -70,7 +70,7 @@ // CHECK-LABEL: @convert_float_to_unsigned_scalar spv.func @convert_float_to_unsigned_scalar(%arg0: f32) "None" { - // CHECK: llvm.fptoui %{{.*}} : !llvm.float to !llvm.i32 + // CHECK: llvm.fptoui %{{.*}} : !llvm.float to i32 %0 = spv.ConvertFToU %arg0: f32 to i32 spv.Return } @@ -88,7 +88,7 @@ // CHECK-LABEL: @convert_signed_to_float_scalar spv.func @convert_signed_to_float_scalar(%arg0: i32) "None" { - // CHECK: llvm.sitofp %{{.*}} : !llvm.i32 to !llvm.float + // CHECK: llvm.sitofp %{{.*}} : i32 to !llvm.float %0 = spv.ConvertSToF %arg0: i32 to f32 spv.Return } @@ -106,7 +106,7 @@ // CHECK-LABEL: @convert_unsigned_to_float_scalar spv.func @convert_unsigned_to_float_scalar(%arg0: i32) "None" { - // CHECK: llvm.uitofp %{{.*}} : !llvm.i32 to !llvm.float + // CHECK: llvm.uitofp %{{.*}} : i32 to !llvm.float %0 = spv.ConvertUToF %arg0: i32 to f32 spv.Return } @@ -148,10 +148,10 @@ // CHECK-LABEL: @sconvert_scalar spv.func @sconvert_scalar(%arg0: i32, %arg1: i64) "None" { - // CHECK: llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: llvm.sext %{{.*}} : i32 to i64 %0 = spv.SConvert %arg0: i32 to i64 - // CHECK: llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32 + // CHECK: llvm.trunc %{{.*}} : i64 to i32 %1 = spv.SConvert %arg1: i64 to i32 spv.Return } @@ -172,10 +172,10 @@ // CHECK-LABEL: @uconvert_scalar spv.func @uconvert_scalar(%arg0: i32, %arg1: i64) "None" { - // CHECK: llvm.zext %{{.*}} : !llvm.i32 to !llvm.i64 + // CHECK: llvm.zext %{{.*}} : i32 to i64 %0 = spv.UConvert %arg0: i32 to i64 - // CHECK: llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32 + // CHECK: llvm.trunc %{{.*}} : i64 to i32 %1 = spv.UConvert %arg1: i64 to i32 spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir @@ -6,7 +6,7 @@ // CHECK-LABEL: @i_equal_scalar spv.func @i_equal_scalar(%arg0: i32, %arg1: i32) "None" { - // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : i32 %0 = spv.IEqual %arg0, %arg1 : i32 spv.Return } @@ -24,7 +24,7 @@ // CHECK-LABEL: @i_not_equal_scalar spv.func @i_not_equal_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : i64 %0 = spv.INotEqual %arg0, %arg1 : i64 spv.Return } @@ -42,7 +42,7 @@ // CHECK-LABEL: @s_greater_than_equal_scalar spv.func @s_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : i64 %0 = spv.SGreaterThanEqual %arg0, %arg1 : i64 spv.Return } @@ -60,7 +60,7 @@ // CHECK-LABEL: @s_greater_than_scalar spv.func @s_greater_than_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : i64 %0 = spv.SGreaterThan %arg0, %arg1 : i64 spv.Return } @@ -78,7 +78,7 @@ // CHECK-LABEL: @s_less_than_equal_scalar spv.func @s_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : i64 %0 = spv.SLessThanEqual %arg0, %arg1 : i64 spv.Return } @@ -96,7 +96,7 @@ // CHECK-LABEL: @s_less_than_scalar spv.func @s_less_than_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64 %0 = spv.SLessThan %arg0, %arg1 : i64 spv.Return } @@ -114,7 +114,7 @@ // CHECK-LABEL: @u_greater_than_equal_scalar spv.func @u_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : i64 %0 = spv.UGreaterThanEqual %arg0, %arg1 : i64 spv.Return } @@ -132,7 +132,7 @@ // CHECK-LABEL: @u_greater_than_scalar spv.func @u_greater_than_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : i64 %0 = spv.UGreaterThan %arg0, %arg1 : i64 spv.Return } @@ -150,7 +150,7 @@ // CHECK-LABEL: @u_less_than_equal_scalar spv.func @u_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : i64 %0 = spv.ULessThanEqual %arg0, %arg1 : i64 spv.Return } @@ -168,7 +168,7 @@ // CHECK-LABEL: @u_less_than_scalar spv.func @u_less_than_scalar(%arg0: i64, %arg1: i64) "None" { - // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : !llvm.i64 + // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : i64 %0 = spv.ULessThan %arg0, %arg1 : i64 spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir @@ -6,9 +6,9 @@ // CHECK-LABEL: @bool_constant_scalar spv.func @bool_constant_scalar() "None" { - // CHECK: llvm.mlir.constant(true) : !llvm.i1 + // CHECK: llvm.mlir.constant(true) : i1 %0 = spv.constant true - // CHECK: llvm.mlir.constant(false) : !llvm.i1 + // CHECK: llvm.mlir.constant(false) : i1 %1 = spv.constant false spv.Return } @@ -24,11 +24,11 @@ // CHECK-LABEL: @integer_constant_scalar spv.func @integer_constant_scalar() "None" { - // CHECK: llvm.mlir.constant(0 : i8) : !llvm.i8 + // CHECK: llvm.mlir.constant(0 : i8) : i8 %0 = spv.constant 0 : i8 - // CHECK: llvm.mlir.constant(-5 : i64) : !llvm.i64 + // CHECK: llvm.mlir.constant(-5 : i64) : i64 %1 = spv.constant -5 : si64 - // CHECK: llvm.mlir.constant(10 : i16) : !llvm.i16 + // CHECK: llvm.mlir.constant(10 : i16) : i16 %2 = spv.constant 10 : ui16 spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir @@ -16,9 +16,9 @@ spv.func @branch_with_arguments() -> () "None" { %0 = spv.constant 0 : i32 %1 = spv.constant true - // CHECK: llvm.br ^bb1(%{{.*}}, %{{.*}} : !llvm.i32, !llvm.i1) + // CHECK: llvm.br ^bb1(%{{.*}}, %{{.*}} : i32, i1) spv.Branch ^label(%0, %1: i32, i1) - // CHECK: ^bb1(%{{.*}}: !llvm.i32, %{{.*}}: !llvm.i1) + // CHECK: ^bb1(%{{.*}}: i32, %{{.*}}: i1) ^label(%arg0: i32, %arg1: i1): spv.Return } @@ -32,7 +32,7 @@ spv.module Logical GLSL450 { spv.func @cond_branch_without_arguments() -> () "None" { - // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1 + // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1 %cond = spv.constant true // CHECK: lvm.cond_br %[[COND]], ^bb1, ^bb2 spv.BranchConditional %cond, ^true, ^false @@ -45,16 +45,16 @@ } spv.func @cond_branch_with_arguments_nested() -> () "None" { - // CHECK: %[[COND1:.*]] = llvm.mlir.constant(true) : !llvm.i1 + // CHECK: %[[COND1:.*]] = llvm.mlir.constant(true) : i1 %cond = spv.constant true %0 = spv.constant 0 : i32 - // CHECK: %[[COND2:.*]] = llvm.mlir.constant(false) : !llvm.i1 + // CHECK: %[[COND2:.*]] = llvm.mlir.constant(false) : i1 %false = spv.constant false - // CHECK: llvm.cond_br %[[COND1]], ^bb1(%{{.*}}, %[[COND2]] : !llvm.i32, !llvm.i1), ^bb2 + // CHECK: llvm.cond_br %[[COND1]], ^bb1(%{{.*}}, %[[COND2]] : i32, i1), ^bb2 spv.BranchConditional %cond, ^outer_true(%0, %false: i32, i1), ^outer_false - // CHECK: ^bb1(%{{.*}}: !llvm.i32, %[[COND:.*]]: !llvm.i1): + // CHECK: ^bb1(%{{.*}}: i32, %[[COND:.*]]: i1): ^outer_true(%arg0: i32, %arg1: i1): - // CHECK: llvm.cond_br %[[COND]], ^bb3, ^bb4(%{{.*}}, %{{.*}} : !llvm.i32, !llvm.i32) + // CHECK: llvm.cond_br %[[COND]], ^bb3, ^bb4(%{{.*}}, %{{.*}} : i32, i32) spv.BranchConditional %arg1, ^inner_true, ^inner_false(%arg0, %arg0: i32, i32) // CHECK: ^bb2: ^outer_false: @@ -62,7 +62,7 @@ // CHECK: ^bb3: ^inner_true: spv.Return - // CHECK: ^bb4(%{{.*}}: !llvm.i32, %{{.*}}: !llvm.i32): + // CHECK: ^bb4(%{{.*}}: i32, %{{.*}}: i32): ^inner_false(%arg3: i32, %arg4: i32): spv.Return } @@ -90,7 +90,7 @@ spv.func @infinite_loop(%count : i32) -> () "None" { // CHECK: llvm.br ^[[BB1:.*]] // CHECK: ^[[BB1]]: - // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1 + // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1 // CHECK: llvm.cond_br %[[COND]], ^[[BB2:.*]], ^[[BB4:.*]] // CHECK: ^[[BB2]]: // CHECK: llvm.br ^[[BB3:.*]] @@ -144,7 +144,7 @@ } spv.func @selection_with_true_block_only() -> () "None" { - // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1 + // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1 %cond = spv.constant true // CHECK: llvm.cond_br %[[COND]], ^bb1, ^bb2 spv.selection { @@ -164,7 +164,7 @@ } spv.func @selection_with_both_true_and_false_block() -> () "None" { - // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : !llvm.i1 + // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1 %cond = spv.constant true // CHECK: llvm.cond_br %[[COND]], ^bb1, ^bb2 spv.selection { @@ -188,14 +188,14 @@ } spv.func @selection_with_early_return(%arg0: i1) -> i32 "None" { - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 %0 = spv.constant 0 : i32 - // CHECK: llvm.cond_br %{{.*}}, ^bb1(%[[ZERO]] : !llvm.i32), ^bb2 + // CHECK: llvm.cond_br %{{.*}}, ^bb1(%[[ZERO]] : i32), ^bb2 spv.selection { spv.BranchConditional %arg0, ^true(%0 : i32), ^merge - // CHECK: ^bb1(%[[ARG:.*]]: !llvm.i32): + // CHECK: ^bb1(%[[ARG:.*]]: i32): ^true(%arg1: i32): - // CHECK: llvm.return %[[ARG]] : !llvm.i32 + // CHECK: llvm.return %[[ARG]] : i32 spv.ReturnValue %arg1 : i32 // CHECK: ^bb2: ^merge: diff --git a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir @@ -16,7 +16,7 @@ // CHECK-LABEL: @return_value spv.func @return_value(%arg: i32) -> i32 "None" { - // CHECK: llvm.return %{{.*}} : !llvm.i32 + // CHECK: llvm.return %{{.*}} : i32 spv.ReturnValue %arg : i32 } @@ -49,7 +49,7 @@ spv.Return } -// CHECK-LABEL: llvm.func @scalar_types(%arg0: !llvm.i32, %arg1: !llvm.i1, %arg2: !llvm.double, %arg3: !llvm.float) +// CHECK-LABEL: llvm.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: !llvm.double, %arg3: !llvm.float) spv.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32) "None" { spv.Return } @@ -65,11 +65,11 @@ //===----------------------------------------------------------------------===// // CHECK-LABEL: llvm.func @function_calls -// CHECK-SAME: %[[ARG0:.*]]: !llvm.i32, %[[ARG1:.*]]: !llvm.i1, %[[ARG2:.*]]: !llvm.double, %[[ARG3:.*]]: !llvm.vec<2 x i64>, %[[ARG4:.*]]: !llvm.vec<2 x float> +// CHECK-SAME: %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: !llvm.double, %[[ARG3:.*]]: !llvm.vec<2 x i64>, %[[ARG4:.*]]: !llvm.vec<2 x float> spv.func @function_calls(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: vector<2xi64>, %arg4: vector<2xf32>) "None" { // CHECK: llvm.call @void_1() : () -> () // CHECK: llvm.call @void_2(%[[ARG3]]) : (!llvm.vec<2 x i64>) -> () - // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!llvm.i32, !llvm.i1, !llvm.double) -> !llvm.i32 + // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (i32, i1, !llvm.double) -> i32 // CHECK: llvm.call @value_vector(%[[ARG3]], %[[ARG4]]) : (!llvm.vec<2 x i64>, !llvm.vec<2 x float>) -> !llvm.vec<2 x float> spv.FunctionCall @void_1() : () -> () spv.FunctionCall @void_2(%arg3) : (vector<2xi64>) -> () diff --git a/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir @@ -123,7 +123,7 @@ // CHECK-LABEL: @smax spv.func @smax(%arg0: i16, %arg1: vector<3xi32>) "None" { - // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (!llvm.i16, !llvm.i16) -> !llvm.i16 + // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (i16, i16) -> i16 %0 = spv.GLSL.SMax %arg0, %arg0 : i16 // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x i32>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x i32> %1 = spv.GLSL.SMax %arg1, %arg1 : vector<3xi32> @@ -136,7 +136,7 @@ // CHECK-LABEL: @smin spv.func @smin(%arg0: i16, %arg1: vector<3xi32>) "None" { - // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (!llvm.i16, !llvm.i16) -> !llvm.i16 + // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (i16, i16) -> i16 %0 = spv.GLSL.SMin %arg0, %arg0 : i16 // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x i32>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x i32> %1 = spv.GLSL.SMin %arg1, %arg1 : vector<3xi32> diff --git a/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir @@ -6,7 +6,7 @@ // CHECK-LABEL: @logical_equal_scalar spv.func @logical_equal_scalar(%arg0: i1, %arg1: i1) "None" { - // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i1 + // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : i1 %0 = spv.LogicalEqual %arg0, %arg0 : i1 spv.Return } @@ -24,7 +24,7 @@ // CHECK-LABEL: @logical_not_equal_scalar spv.func @logical_not_equal_scalar(%arg0: i1, %arg1: i1) "None" { - // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i1 + // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : i1 %0 = spv.LogicalNotEqual %arg0, %arg0 : i1 spv.Return } @@ -42,8 +42,8 @@ // CHECK-LABEL: @logical_not_scalar spv.func @logical_not_scalar(%arg0: i1) "None" { - // CHECK: %[[CONST:.*]] = llvm.mlir.constant(true) : !llvm.i1 - // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm.i1 + // CHECK: %[[CONST:.*]] = llvm.mlir.constant(true) : i1 + // CHECK: llvm.xor %{{.*}}, %[[CONST]] : i1 %0 = spv.LogicalNot %arg0 : i1 spv.Return } @@ -62,7 +62,7 @@ // CHECK-LABEL: @logical_and_scalar spv.func @logical_and_scalar(%arg0: i1, %arg1: i1) "None" { - // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm.i1 + // CHECK: llvm.and %{{.*}}, %{{.*}} : i1 %0 = spv.LogicalAnd %arg0, %arg0 : i1 spv.Return } @@ -80,7 +80,7 @@ // CHECK-LABEL: @logical_or_scalar spv.func @logical_or_scalar(%arg0: i1, %arg1: i1) "None" { - // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm.i1 + // CHECK: llvm.or %{{.*}}, %{{.*}} : i1 %0 = spv.LogicalOr %arg0, %arg0 : i1 spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir @@ -15,11 +15,11 @@ // CHECK-LABEL: @main // CHECK: %[[SRC:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: %[[DEST:.*]] = llvm.mlir.addressof @__spv__foo_bar_arg_0_descriptor_set0_binding0 : !llvm.ptr)>> - // CHECK-NEXT: llvm.mlir.constant(false) : !llvm.i1 - // CHECK-NEXT: "llvm.intr.memcpy"(%[[DEST]], %[[SRC]], %[[SIZE:.*]], %{{.*}}) : (!llvm.ptr)>>, !llvm.ptr, !llvm.i64, !llvm.i1) -> () + // CHECK-NEXT: llvm.mlir.constant(false) : i1 + // CHECK-NEXT: "llvm.intr.memcpy"(%[[DEST]], %[[SRC]], %[[SIZE:.*]], %{{.*}}) : (!llvm.ptr)>>, !llvm.ptr, i64, i1) -> () // CHECK-NEXT: llvm.call @__spv__foo_bar() : () -> () - // CHECK-NEXT: llvm.mlir.constant(false) : !llvm.i1 - // CHECK-NEXT: "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]], %{{.*}}) : (!llvm.ptr, !llvm.ptr)>>, !llvm.i64, !llvm.i1) -> () + // CHECK-NEXT: llvm.mlir.constant(false) : i1 + // CHECK-NEXT: "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]], %{{.*}}) : (!llvm.ptr, !llvm.ptr)>>, i64, i1) -> () spv.module @__spv__foo Logical GLSL450 requires #spv.vce { spv.globalVariable @bar_arg_0 bind(0, 0) : !spv.ptr [0])>, StorageBuffer> diff --git a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir @@ -6,11 +6,11 @@ // CHECK-LABEL: @access_chain spv.func @access_chain() "None" { - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 %0 = spv.constant 1: i32 %1 = spv.Variable : !spv.ptr)>, Function> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %[[ONE]], %[[ONE]]] : (!llvm.ptr)>>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.ptr + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %[[ONE]], %[[ONE]]] : (!llvm.ptr)>>, i32, i32, i32) -> !llvm.ptr %2 = spv.AccessChain %1[%0, %0] : !spv.ptr)>, Function>, i32, i32 spv.Return } @@ -18,8 +18,8 @@ // CHECK-LABEL: @access_chain_array spv.func @access_chain_array(%arg0 : i32) "None" { %0 = spv.Variable : !spv.ptr>, Function> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr>>, !llvm.i32, !llvm.i32) -> !llvm.ptr> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr>>, i32, i32) -> !llvm.ptr> %1 = spv.AccessChain %0[%arg0] : !spv.ptr>, Function>, i32 %2 = spv.Load "Function" %1 ["Volatile"] : !spv.array<4xf32> spv.Return @@ -46,7 +46,7 @@ } spv.module Logical GLSL450 { - // CHECK: llvm.mlir.global external @bar_descriptor_set0_binding0() : !llvm.i32 + // CHECK: llvm.mlir.global external @bar_descriptor_set0_binding0() : i32 // CHECK-LABEL: @foo // CHECK: llvm.mlir.addressof @bar_descriptor_set0_binding0 : !llvm.ptr spv.globalVariable @bar bind(0, 0) : !spv.ptr @@ -57,7 +57,7 @@ } spv.module @name Logical GLSL450 { - // CHECK: llvm.mlir.global external @name_bar_descriptor_set0_binding0() : !llvm.i32 + // CHECK: llvm.mlir.global external @name_bar_descriptor_set0_binding0() : i32 // CHECK-LABEL: @foo // CHECK: llvm.mlir.addressof @name_bar_descriptor_set0_binding0 : !llvm.ptr spv.globalVariable @bar bind(0, 0) : !spv.ptr @@ -161,20 +161,20 @@ // CHECK-LABEL: @variable_scalar spv.func @variable_scalar() "None" { - // CHECK: %[[SIZE1:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: llvm.alloca %[[SIZE1]] x !llvm.float : (!llvm.i32) -> !llvm.ptr + // CHECK: %[[SIZE1:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: llvm.alloca %[[SIZE1]] x !llvm.float : (i32) -> !llvm.ptr %0 = spv.Variable : !spv.ptr - // CHECK: %[[SIZE2:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: llvm.alloca %[[SIZE2]] x !llvm.i8 : (!llvm.i32) -> !llvm.ptr + // CHECK: %[[SIZE2:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: llvm.alloca %[[SIZE2]] x i8 : (i32) -> !llvm.ptr %1 = spv.Variable : !spv.ptr spv.Return } // CHECK-LABEL: @variable_scalar_with_initialization spv.func @variable_scalar_with_initialization() "None" { - // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 - // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x !llvm.i64 : (!llvm.i32) -> !llvm.ptr + // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr // CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : !llvm.ptr %c = spv.constant 0 : i64 %0 = spv.Variable init(%c) : !spv.ptr @@ -183,8 +183,8 @@ // CHECK-LABEL: @variable_vector spv.func @variable_vector() "None" { - // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: llvm.alloca %[[SIZE]] x !llvm.vec<3 x float> : (!llvm.i32) -> !llvm.ptr> + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: llvm.alloca %[[SIZE]] x !llvm.vec<3 x float> : (i32) -> !llvm.ptr> %0 = spv.Variable : !spv.ptr, Function> spv.Return } @@ -192,8 +192,8 @@ // CHECK-LABEL: @variable_vector_with_initialization spv.func @variable_vector_with_initialization() "None" { // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(dense : vector<3xi1>) : !llvm.vec<3 x i1> - // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x !llvm.vec<3 x i1> : (!llvm.i32) -> !llvm.ptr> + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x !llvm.vec<3 x i1> : (i32) -> !llvm.ptr> // CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : !llvm.ptr> %c = spv.constant dense : vector<3xi1> %0 = spv.Variable init(%c) : !spv.ptr, Function> @@ -202,8 +202,8 @@ // CHECK-LABEL: @variable_array spv.func @variable_array() "None" { - // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: llvm.alloca %[[SIZE]] x !llvm.array<10 x i32> : (!llvm.i32) -> !llvm.ptr> + // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: llvm.alloca %[[SIZE]] x !llvm.array<10 x i32> : (i32) -> !llvm.ptr> %0 = spv.Variable : !spv.ptr, Function> spv.Return } diff --git a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir @@ -13,8 +13,8 @@ // CHECK-LABEL: @composite_extract_vector spv.func @composite_extract_vector(%arg: vector<3xf32>) "None" { - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 - // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : !llvm.i32] : !llvm.vec<3 x float> + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : i32] : !llvm.vec<3 x float> %0 = spv.CompositeExtract %arg[0 : i32] : vector<3xf32> spv.Return } @@ -32,8 +32,8 @@ // CHECK-LABEL: @composite_insert_vector spv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" { - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 - // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : !llvm.i32] : !llvm.vec<3 x float> + // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : i32] : !llvm.vec<3 x float> %0 = spv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into vector<3xf32> spv.Return } @@ -44,9 +44,9 @@ // CHECK-LABEL: @select_scalar spv.func @select_scalar(%arg0: i1, %arg1: vector<3xi32>, %arg2: f32) "None" { - // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.vec<3 x i32> + // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, !llvm.vec<3 x i32> %0 = spv.Select %arg0, %arg1, %arg1 : i1, vector<3xi32> - // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.float + // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, !llvm.float %1 = spv.Select %arg0, %arg2, %arg2 : i1, f32 spv.Return } @@ -65,7 +65,7 @@ // CHECK: module { // CHECK-NEXT: llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> { // CHECK-NEXT: %[[UNDEF:.*]] = llvm.mlir.undef : !llvm.struct<(i32)> -// CHECK-NEXT: %[[VAL:.*]] = llvm.mlir.constant(31 : i32) : !llvm.i32 +// CHECK-NEXT: %[[VAL:.*]] = llvm.mlir.constant(31 : i32) : i32 // CHECK-NEXT: %[[RET:.*]] = llvm.insertvalue %[[VAL]], %[[UNDEF]][0 : i32] : !llvm.struct<(i32)> // CHECK-NEXT: llvm.return %[[RET]] : !llvm.struct<(i32)> // CHECK-NEXT: } @@ -84,13 +84,13 @@ // CHECK: module { // CHECK-NEXT: llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32, array<3 x i32>)> { // CHECK-NEXT: %[[UNDEF:.*]] = llvm.mlir.undef : !llvm.struct<(i32, array<3 x i32>)> -// CHECK-NEXT: %[[EM:.*]] = llvm.mlir.constant(18 : i32) : !llvm.i32 +// CHECK-NEXT: %[[EM:.*]] = llvm.mlir.constant(18 : i32) : i32 // CHECK-NEXT: %[[T0:.*]] = llvm.insertvalue %[[EM]], %[[UNDEF]][0 : i32] : !llvm.struct<(i32, array<3 x i32>)> -// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(32 : i32) : !llvm.i32 +// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(32 : i32) : i32 // CHECK-NEXT: %[[T1:.*]] = llvm.insertvalue %[[C0]], %[[T0]][1 : i32, 0 : i32] : !llvm.struct<(i32, array<3 x i32>)> -// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 +// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK-NEXT: %[[T2:.*]] = llvm.insertvalue %[[C1]], %[[T1]][1 : i32, 1 : i32] : !llvm.struct<(i32, array<3 x i32>)> -// CHECK-NEXT: %[[C2:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 +// CHECK-NEXT: %[[C2:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK-NEXT: %[[RET:.*]] = llvm.insertvalue %[[C2]], %[[T2]][1 : i32, 2 : i32] : !llvm.struct<(i32, array<3 x i32>)> // CHECK-NEXT: llvm.return %[[RET]] : !llvm.struct<(i32, array<3 x i32>)> // CHECK-NEXT: } diff --git a/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir --- a/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir +++ b/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir @@ -6,18 +6,18 @@ // CHECK-LABEL: @shift_right_arithmetic_scalar spv.func @shift_right_arithmetic_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" { - // CHECK: llvm.ashr %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.ashr %{{.*}}, %{{.*}} : i32 %0 = spv.ShiftRightArithmetic %arg0, %arg0 : i32, i32 - // CHECK: llvm.ashr %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.ashr %{{.*}}, %{{.*}} : i32 %1 = spv.ShiftRightArithmetic %arg0, %arg1 : i32, si32 - // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32 - // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : !llvm.i32 + // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32 + // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : i32 %2 = spv.ShiftRightArithmetic %arg0, %arg2 : i32, i16 - // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : !llvm.i16 to !llvm.i32 - // CHECK: llvm.ashr %{{.*}}, %[[ZEXT]] : !llvm.i32 + // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32 + // CHECK: llvm.ashr %{{.*}}, %[[ZEXT]] : i32 %3 = spv.ShiftRightArithmetic %arg0, %arg3 : i32, ui16 spv.Return } @@ -46,18 +46,18 @@ // CHECK-LABEL: @shift_right_logical_scalar spv.func @shift_right_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : si16, %arg3 : ui16) "None" { - // CHECK: llvm.lshr %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.lshr %{{.*}}, %{{.*}} : i32 %0 = spv.ShiftRightLogical %arg0, %arg0 : i32, i32 - // CHECK: llvm.lshr %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.lshr %{{.*}}, %{{.*}} : i32 %1 = spv.ShiftRightLogical %arg0, %arg1 : i32, si32 - // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32 - // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : !llvm.i32 + // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32 + // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : i32 %2 = spv.ShiftRightLogical %arg0, %arg2 : i32, si16 - // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : !llvm.i16 to !llvm.i32 - // CHECK: llvm.lshr %{{.*}}, %[[ZEXT]] : !llvm.i32 + // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32 + // CHECK: llvm.lshr %{{.*}}, %[[ZEXT]] : i32 %3 = spv.ShiftRightLogical %arg0, %arg3 : i32, ui16 spv.Return } @@ -86,18 +86,18 @@ // CHECK-LABEL: @shift_left_logical_scalar spv.func @shift_left_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" { - // CHECK: llvm.shl %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.shl %{{.*}}, %{{.*}} : i32 %0 = spv.ShiftLeftLogical %arg0, %arg0 : i32, i32 - // CHECK: llvm.shl %{{.*}}, %{{.*}} : !llvm.i32 + // CHECK: llvm.shl %{{.*}}, %{{.*}} : i32 %1 = spv.ShiftLeftLogical %arg0, %arg1 : i32, si32 - // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : !llvm.i16 to !llvm.i32 - // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : !llvm.i32 + // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32 + // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : i32 %2 = spv.ShiftLeftLogical %arg0, %arg2 : i32, i16 - // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : !llvm.i16 to !llvm.i32 - // CHECK: llvm.shl %{{.*}}, %[[ZEXT]] : !llvm.i32 + // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32 + // CHECK: llvm.shl %{{.*}}, %[[ZEXT]] : i32 %3 = spv.ShiftLeftLogical %arg0, %arg3 : i32, ui16 spv.Return } diff --git a/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir b/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir --- a/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir +++ b/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir @@ -8,8 +8,8 @@ // An external function is transformed into the glue around calling an interface function. // CHECK-LABEL: @external -// CHECK: %[[ALLOC0:.*]]: !llvm.ptr, %[[ALIGN0:.*]]: !llvm.ptr, %[[OFFSET0:.*]]: !llvm.i64, %[[SIZE00:.*]]: !llvm.i64, %[[SIZE01:.*]]: !llvm.i64, %[[STRIDE00:.*]]: !llvm.i64, %[[STRIDE01:.*]]: !llvm.i64, -// CHECK: %[[ALLOC1:.*]]: !llvm.ptr, %[[ALIGN1:.*]]: !llvm.ptr, %[[OFFSET1:.*]]: !llvm.i64) +// CHECK: %[[ALLOC0:.*]]: !llvm.ptr, %[[ALIGN0:.*]]: !llvm.ptr, %[[OFFSET0:.*]]: i64, %[[SIZE00:.*]]: i64, %[[SIZE01:.*]]: i64, %[[STRIDE00:.*]]: i64, %[[STRIDE01:.*]]: i64, +// CHECK: %[[ALLOC1:.*]]: !llvm.ptr, %[[ALIGN1:.*]]: !llvm.ptr, %[[OFFSET1:.*]]: i64) func private @external(%arg0: memref, %arg1: memref) // Populate the descriptor for arg0. // CHECK: %[[DESC00:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -67,7 +67,7 @@ // CHECK: %[[OFFSET1:.*]] = llvm.extractvalue %[[DESC1]][2] // Forward the values to the call. - // CHECK: llvm.call @external(%[[ALLOC0]], %[[ALIGN0]], %[[OFFSET0]], %[[SIZE00]], %[[SIZE01]], %[[STRIDE00]], %[[STRIDE01]], %[[ALLOC1]], %[[ALIGN1]], %[[OFFSET1]]) : (!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.ptr, !llvm.ptr, !llvm.i64) -> () + // CHECK: llvm.call @external(%[[ALLOC0]], %[[ALIGN0]], %[[OFFSET0]], %[[SIZE00]], %[[SIZE01]], %[[STRIDE00]], %[[STRIDE01]], %[[ALLOC1]], %[[ALIGN1]], %[[OFFSET1]]) : (!llvm.ptr, !llvm.ptr, i64, i64, i64, i64, i64, !llvm.ptr, !llvm.ptr, i64) -> () call @external(%0#0, %0#1) : (memref, memref) -> () return } @@ -93,7 +93,7 @@ // CHECK: %[[STRIDE:.*]] = llvm.extractvalue %[[DESC]][4, 0] // Forward the descriptor components to the call. - // CHECK: llvm.call @callee(%[[ALLOC]], %[[ALIGN]], %[[OFFSET]], %[[SIZE]], %[[STRIDE]], %{{.*}}) : (!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64, !llvm.i64) -> () + // CHECK: llvm.call @callee(%[[ALLOC]], %[[ALIGN]], %[[OFFSET]], %[[SIZE]], %[[STRIDE]], %{{.*}}) : (!llvm.ptr, !llvm.ptr, i64, i64, i64, i64) -> () // EMIT_C_ATTRIBUTE-NOT: @mlir_ciface_callee @@ -132,7 +132,7 @@ // CHECK: %[[TABLES_SIZE:.*]] = llvm.mul %[[DOUBLE_RANK_INC]], %[[IDX_SIZE]] // CHECK: %[[ALLOC_SIZE:.*]] = llvm.add %[[DOUBLE_PTR_SIZE]], %[[TABLES_SIZE]] // CHECK: %[[FALSE:.*]] = llvm.mlir.constant(false) - // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOC_SIZE]] x !llvm.i8 + // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOC_SIZE]] x i8 // CHECK: %[[SOURCE:.*]] = llvm.extractvalue %[[CALL_RES]][1] // CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SOURCE]], %[[ALLOC_SIZE]], %[[FALSE]]) // CHECK: llvm.call @free(%[[SOURCE]]) @@ -187,7 +187,7 @@ // CHECK: %[[RES_2:.*]] = llvm.extractvalue %[[CALL_RES]][1] %0:2 = call @return_two_var_memref(%arg0) : (memref<4x3xf32>) -> (memref<*xf32>, memref<*xf32>) - // CHECK: %[[ALLOCA_1:.*]] = llvm.alloca %{{.*}} x !llvm.i8 + // CHECK: %[[ALLOCA_1:.*]] = llvm.alloca %{{.*}} x i8 // CHECK: %[[SOURCE_1:.*]] = llvm.extractvalue %[[RES_1:.*]][1] : ![[DESC_TYPE:.*]] // CHECK: "llvm.intr.memcpy"(%[[ALLOCA_1]], %[[SOURCE_1]], %{{.*}}, %[[FALSE:.*]]) // CHECK: llvm.call @free(%[[SOURCE_1]]) @@ -195,7 +195,7 @@ // CHECK: %[[DESC_11:.*]] = llvm.insertvalue %{{.*}}, %[[DESC_1]][0] // CHECK: llvm.insertvalue %[[ALLOCA_1]], %[[DESC_11]][1] - // CHECK: %[[ALLOCA_2:.*]] = llvm.alloca %{{.*}} x !llvm.i8 + // CHECK: %[[ALLOCA_2:.*]] = llvm.alloca %{{.*}} x i8 // CHECK: %[[SOURCE_2:.*]] = llvm.extractvalue %[[RES_2:.*]][1] // CHECK: "llvm.intr.memcpy"(%[[ALLOCA_2]], %[[SOURCE_2]], %{{.*}}, %[[FALSE]]) // CHECK: llvm.call @free(%[[SOURCE_2]]) diff --git a/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir b/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-argattrs.mlir @@ -12,8 +12,8 @@ // CHECK-LABEL: func @check_multiple // Make sure arguments attributes are attached to the right argument. We match // commas in the argument list for this purpose. -// CHECK: %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true}, -// CHECK-SAME: %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}) +// CHECK: %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: i{{.*}} {first.arg = true}, +// CHECK-SAME: %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: i{{.*}} {second.arg = 42 : i32}) func @check_multiple(%first: memref {first.arg = true}, %second: memref {second.arg = 42 : i32}) { return } diff --git a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir @@ -3,11 +3,11 @@ // CHECK-LABEL: func @check_strided_memref_arguments( // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)->(20 * i + j + 1)>>, %dynamic : memref(M * i + j + 1)>>, %mixed : memref<10x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>) { @@ -16,31 +16,31 @@ // CHECK-LABEL: func @check_arguments // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref, %mixed : memref<10x?xf32>) { return } // CHECK-LABEL: func @mixed_alloc( -// CHECK: %[[M:.*]]: !llvm.i64, %[[N:.*]]: !llvm.i64) -> !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> { +// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> { func @mixed_alloc(%arg0: index, %arg1: index) -> memref { -// CHECK: %[[c42:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : !llvm.i64 -// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : !llvm.i64 +// CHECK: %[[c42:.*]] = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64 +// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr // CHECK-NEXT: llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[c42]], %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> @@ -64,19 +64,19 @@ } // CHECK-LABEL: func @dynamic_alloc( -// CHECK: %[[M:.*]]: !llvm.i64, %[[N:.*]]: !llvm.i64) -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { +// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { func @dynamic_alloc(%arg0: index, %arg1: index) -> memref { -// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : !llvm.i64 +// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr // CHECK-NEXT: llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -90,18 +90,18 @@ // ----- // CHECK-LABEL: func @dynamic_alloca -// CHECK: %[[M:.*]]: !llvm.i64, %[[N:.*]]: !llvm.i64) -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { +// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { func @dynamic_alloca(%arg0: index, %arg1: index) -> memref { -// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[num_elems:.*]] = llvm.mul %[[N]], %[[M]] : !llvm.i64 +// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: %[[num_elems:.*]] = llvm.mul %[[N]], %[[M]] : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[sz_bytes]] x !llvm.float : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[sz_bytes]] x !llvm.float : (i64) -> !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[allocated]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[allocated]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -112,7 +112,7 @@ // Test with explicitly specified alignment. llvm.alloca takes care of the // alignment. The same pointer is thus used for allocation and aligned // accesses. -// CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (!llvm.i64) -> !llvm.ptr +// CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (i64) -> !llvm.ptr // CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -132,15 +132,15 @@ // CHECK-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { // ALIGNED-ALLOC-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> { -// ALIGNED-ALLOC-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 -// ALIGNED-ALLOC-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64 -// ALIGNED-ALLOC-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// ALIGNED-ALLOC-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64 +// ALIGNED-ALLOC-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64 +// ALIGNED-ALLOC-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64 +// ALIGNED-ALLOC-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 +// ALIGNED-ALLOC-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64 // ALIGNED-ALLOC-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 -// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (!llvm.i64, !llvm.i64) -> !llvm.ptr +// ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr +// ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : i64 +// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (i64, i64) -> !llvm.ptr // ALIGNED-ALLOC-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr to !llvm.ptr %0 = alloc() {alignment = 32} : memref<32x18xf32> // Do another alloc just to test that we have a unique declaration for @@ -149,19 +149,19 @@ %1 = alloc() {alignment = 64} : memref<4096xf32> // Alignment is to element type boundaries (minimum 16 bytes). - // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 + // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : i64 // ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c32]] %2 = alloc() : memref<4096xvector<8xf32>> // The minimum alignment is 16 bytes unless explicitly specified. - // ALIGNED-ALLOC: %[[c16:.*]] = llvm.mlir.constant(16 : index) : !llvm.i64 + // ALIGNED-ALLOC: %[[c16:.*]] = llvm.mlir.constant(16 : index) : i64 // ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c16]], %3 = alloc() : memref<4096xvector<2xf32>> - // ALIGNED-ALLOC: %[[c8:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 + // ALIGNED-ALLOC: %[[c8:.*]] = llvm.mlir.constant(8 : index) : i64 // ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c8]], %4 = alloc() {alignment = 8} : memref<1024xvector<4xf32>> // Bump the memref allocation size if its size is not a multiple of alignment. - // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 - // ALIGNED-ALLOC-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64 + // ALIGNED-ALLOC: %[[c32:.*]] = llvm.mlir.constant(32 : index) : i64 + // ALIGNED-ALLOC-NEXT: llvm.mlir.constant(1 : index) : i64 // ALIGNED-ALLOC-NEXT: llvm.sub // ALIGNED-ALLOC-NEXT: llvm.add // ALIGNED-ALLOC-NEXT: llvm.urem @@ -169,7 +169,7 @@ // ALIGNED-ALLOC-NEXT: llvm.call @aligned_alloc(%[[c32]], %[[SIZE_ALIGNED]]) %5 = alloc() {alignment = 32} : memref<100xf32> // Bump alignment to the next power of two if it isn't. - // ALIGNED-ALLOC: %[[c128:.*]] = llvm.mlir.constant(128 : index) : !llvm.i64 + // ALIGNED-ALLOC: %[[c128:.*]] = llvm.mlir.constant(128 : index) : i64 // ALIGNED-ALLOC: llvm.call @aligned_alloc(%[[c128]] %6 = alloc(%N) : memref> return %0 : memref<32x18xf32> @@ -177,15 +177,15 @@ // CHECK-LABEL: func @mixed_load( // CHECK-COUNT-2: !llvm.ptr, -// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64 -// CHECK: %[[I:.*]]: !llvm.i64, -// CHECK: %[[J:.*]]: !llvm.i64) +// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64 +// CHECK: %[[I:.*]]: i64, +// CHECK: %[[J:.*]]: i64) func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr %0 = load %mixed[%i, %j] : memref<42x?xf32> return @@ -194,19 +194,19 @@ // CHECK-LABEL: func @dynamic_load( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64 func @dynamic_load(%dynamic : memref, %i : index, %j : index) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr %0 = load %dynamic[%i, %j] : memref return @@ -215,33 +215,33 @@ // CHECK-LABEL: func @prefetch // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64 func @prefetch(%A : memref, %i : index, %j : index) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 -// CHECK-NEXT: [[C3:%.*]] = llvm.mlir.constant(3 : i32) : !llvm.i32 -// CHECK-NEXT: [[C1_1:%.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 -// CHECK-NEXT: "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.i32) -> () +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: [[C3:%.*]] = llvm.mlir.constant(3 : i32) : i32 +// CHECK-NEXT: [[C1_1:%.*]] = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr, i32, i32, i32) -> () prefetch %A[%i, %j], write, locality<3>, data : memref -// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: [[C0_1:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: [[C1_2:%.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 -// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.i32) -> () +// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: [[C0_1:%.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: [[C1_2:%.*]] = llvm.mlir.constant(1 : i32) : i32 +// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr, i32, i32, i32) -> () prefetch %A[%i, %j], read, locality<0>, data : memref -// CHECK: [[C0_2:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : !llvm.i32 -// CHECK: [[C0_3:%.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.i32) -> () +// CHECK: [[C0_2:%.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32 +// CHECK: [[C0_3:%.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr, i32, i32, i32) -> () prefetch %A[%i, %j], read, locality<2>, instr : memref return } @@ -249,19 +249,19 @@ // CHECK-LABEL: func @dynamic_store // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64 func @dynamic_store(%dynamic : memref, %i : index, %j : index, %val : f32) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr store %val, %dynamic[%i, %j] : memref return @@ -270,19 +270,19 @@ // CHECK-LABEL: func @mixed_store // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64 func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr store %val, %mixed[%i, %j] : memref<42x?xf32> return @@ -339,11 +339,11 @@ // CHECK-LABEL: func @memref_cast_ranked_to_unranked func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) { -// CHECK-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> : (!llvm.i64) -> !llvm.ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>> +// CHECK-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>> // CHECK-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>> // CHECK-DAG: %[[p2:.*]] = llvm.bitcast %[[p]] : !llvm.ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>> to !llvm.ptr -// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : i64) : !llvm.i64 +// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : i64) : i64 // CHECK : llvm.mlir.undef : !llvm.struct<(i64, ptr)> // CHECK-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i64, ptr)> // CHECK-DAG: llvm.insertvalue %[[p2]], %{{.*}}[1] : !llvm.struct<(i64, ptr)> @@ -361,7 +361,7 @@ // CHECK-LABEL: func @mixed_memref_dim func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) { -// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(42 : index) : i64 %c0 = constant 0 : index %0 = dim %mixed, %c0 : memref<42x?x?x13x?xf32> // CHECK: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> @@ -370,7 +370,7 @@ // CHECK: llvm.extractvalue %[[ld]][3, 2] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> %c2 = constant 2 : index %2 = dim %mixed, %c2 : memref<42x?x?x13x?xf32> -// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(13 : index) : i64 %c3 = constant 3 : index %3 = dim %mixed, %c3 : memref<42x?x?x13x?xf32> // CHECK: llvm.extractvalue %[[ld]][3, 4] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> @@ -380,7 +380,7 @@ } // CHECK-LABEL: @memref_dim_with_dyn_index -// CHECK-SAME: %[[ALLOC_PTR:.*]]: !llvm.ptr, %[[ALIGN_PTR:.*]]: !llvm.ptr, %[[OFFSET:.*]]: !llvm.i64, %[[SIZE0:.*]]: !llvm.i64, %[[SIZE1:.*]]: !llvm.i64, %[[STRIDE0:.*]]: !llvm.i64, %[[STRIDE1:.*]]: !llvm.i64, %[[IDX:.*]]: !llvm.i64) -> !llvm.i64 +// CHECK-SAME: %[[ALLOC_PTR:.*]]: !llvm.ptr, %[[ALIGN_PTR:.*]]: !llvm.ptr, %[[OFFSET:.*]]: i64, %[[SIZE0:.*]]: i64, %[[SIZE1:.*]]: i64, %[[STRIDE0:.*]]: i64, %[[STRIDE1:.*]]: i64, %[[IDX:.*]]: i64) -> i64 func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index { // CHECK-NEXT: %[[DESCR0:.*]] = llvm.mlir.undef : [[DESCR_TY:!llvm.struct<\(ptr, ptr, i64, array<2 x i64>, array<2 x i64>\)>]] // CHECK-NEXT: %[[DESCR1:.*]] = llvm.insertvalue %[[ALLOC_PTR]], %[[DESCR0]][0] : [[DESCR_TY]] @@ -390,14 +390,14 @@ // CHECK-NEXT: %[[DESCR5:.*]] = llvm.insertvalue %[[STRIDE0]], %[[DESCR4]][4, 0] : [[DESCR_TY]] // CHECK-NEXT: %[[DESCR6:.*]] = llvm.insertvalue %[[SIZE1]], %[[DESCR5]][3, 1] : [[DESCR_TY]] // CHECK-NEXT: %[[DESCR7:.*]] = llvm.insertvalue %[[STRIDE1]], %[[DESCR6]][4, 1] : [[DESCR_TY]] - // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 + // CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-DAG: %[[SIZES:.*]] = llvm.extractvalue %[[DESCR7]][3] : [[DESCR_TY]] - // CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (!llvm.i64) -> !llvm.ptr> + // CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (i64) -> !llvm.ptr> // CHECK-DAG: llvm.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.ptr> - // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr>, !llvm.i64, !llvm.i64) -> !llvm.ptr + // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr // CHECK-DAG: %[[RESULT:.*]] = llvm.load %[[RESULT_PTR]] : !llvm.ptr - // CHECK-DAG: llvm.return %[[RESULT]] : !llvm.i64 + // CHECK-DAG: llvm.return %[[RESULT]] : i64 %result = dim %arg, %idx : memref<3x?xf32> return %result : index } @@ -415,15 +415,15 @@ // CHECK: [[ALIGNED_PTR:%.*]] = llvm.extractvalue [[INPUT]][1] : [[TY]] // CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]] // CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]] -// CHECK: [[OFFSET:%.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK: [[OFFSET:%.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: [[OUT_3:%.*]] = llvm.insertvalue [[OFFSET]], [[OUT_2]][2] : [[TY]] -// CHECK: [[SIZE_0:%.*]] = llvm.mlir.constant(6 : index) : !llvm.i64 +// CHECK: [[SIZE_0:%.*]] = llvm.mlir.constant(6 : index) : i64 // CHECK: [[OUT_4:%.*]] = llvm.insertvalue [[SIZE_0]], [[OUT_3]][3, 0] : [[TY]] -// CHECK: [[SIZE_1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: [[SIZE_1:%.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: [[OUT_5:%.*]] = llvm.insertvalue [[SIZE_1]], [[OUT_4]][4, 0] : [[TY]] -// CHECK: [[STRIDE_0:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: [[STRIDE_0:%.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: [[OUT_6:%.*]] = llvm.insertvalue [[STRIDE_0]], [[OUT_5]][3, 1] : [[TY]] -// CHECK: [[STRIDE_1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: [[STRIDE_1:%.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: [[OUT_7:%.*]] = llvm.insertvalue [[STRIDE_1]], [[OUT_6]][4, 1] : [[TY]] // CHECK-LABEL: @memref_reinterpret_cast_unranked_to_dynamic_shape @@ -439,18 +439,18 @@ : memref<*xf32> to memref return } -// CHECK-SAME: ([[OFFSET:%[a-z,0-9]+]]: !llvm.i64, -// CHECK-SAME: [[SIZE_0:%[a-z,0-9]+]]: !llvm.i64, [[SIZE_1:%[a-z,0-9]+]]: !llvm.i64, -// CHECK-SAME: [[STRIDE_0:%[a-z,0-9]+]]: !llvm.i64, [[STRIDE_1:%[a-z,0-9]+]]: !llvm.i64, +// CHECK-SAME: ([[OFFSET:%[a-z,0-9]+]]: i64, +// CHECK-SAME: [[SIZE_0:%[a-z,0-9]+]]: i64, [[SIZE_1:%[a-z,0-9]+]]: i64, +// CHECK-SAME: [[STRIDE_0:%[a-z,0-9]+]]: i64, [[STRIDE_1:%[a-z,0-9]+]]: i64, // CHECK: [[INPUT:%.*]] = llvm.insertvalue {{.*}}[1] : !llvm.struct<(i64, ptr)> // CHECK: [[OUT_0:%.*]] = llvm.mlir.undef : [[TY:!.*]] // CHECK: [[DESCRIPTOR:%.*]] = llvm.extractvalue [[INPUT]][1] : !llvm.struct<(i64, ptr)> // CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr to !llvm.ptr> // CHECK: [[BASE_PTR:%.*]] = llvm.load [[BASE_PTR_PTR]] : !llvm.ptr> // CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr to !llvm.ptr> -// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]] -// CHECK-SAME: : (!llvm.ptr>, !llvm.i64) -> !llvm.ptr> +// CHECK-SAME: : (!llvm.ptr>, i64) -> !llvm.ptr> // CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr> // CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]] // CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]] @@ -473,13 +473,13 @@ // CHECK: [[UNRANKED_OUT_1:%.*]] = llvm.insertvalue [[RANK]], [[UNRANKED_OUT_O]][0] : !llvm.struct<(i64, ptr)> // Compute size in bytes to allocate result ranked descriptor -// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: [[PTR_SIZE:%.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 -// CHECK: [[INDEX_SIZE:%.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 -// CHECK: [[DOUBLE_PTR_SIZE:%.*]] = llvm.mul [[C2]], [[PTR_SIZE]] : !llvm.i64 +// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: [[PTR_SIZE:%.*]] = llvm.mlir.constant(8 : index) : i64 +// CHECK: [[INDEX_SIZE:%.*]] = llvm.mlir.constant(8 : index) : i64 +// CHECK: [[DOUBLE_PTR_SIZE:%.*]] = llvm.mul [[C2]], [[PTR_SIZE]] : i64 // CHECK: [[DESC_ALLOC_SIZE:%.*]] = llvm.add [[DOUBLE_PTR_SIZE]], %{{.*}} -// CHECK: [[UNDERLYING_DESC:%.*]] = llvm.alloca [[DESC_ALLOC_SIZE]] x !llvm.i8 +// CHECK: [[UNDERLYING_DESC:%.*]] = llvm.alloca [[DESC_ALLOC_SIZE]] x i8 // CHECK: llvm.insertvalue [[UNDERLYING_DESC]], [[UNRANKED_OUT_1]][1] // Set allocated, aligned pointers and offset. @@ -490,11 +490,11 @@ // CHECK-SAME: !llvm.ptr to !llvm.ptr> // CHECK: llvm.store [[ALLOC_PTR]], [[BASE_PTR_PTR]] : !llvm.ptr> // CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr to !llvm.ptr> -// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]] // CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr> // CHECK: [[BASE_PTR_PTR__:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr to !llvm.ptr> -// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 +// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64 // CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}[[C2]]] // CHECK: [[OFFSET_PTR:%.*]] = llvm.bitcast [[OFFSET_PTR_]] // CHECK: llvm.store [[OFFSET]], [[OFFSET_PTR]] : !llvm.ptr @@ -502,18 +502,18 @@ // Iterate over shape operand in reverse order and set sizes and strides. // CHECK: [[STRUCT_PTR:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] // CHECK-SAME: !llvm.ptr to !llvm.ptr, ptr, i64, i64)>> -// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: [[C3_I32:%.*]] = llvm.mlir.constant(3 : i32) : !llvm.i32 +// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: [[C3_I32:%.*]] = llvm.mlir.constant(3 : i32) : i32 // CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}[[C0]], [[C3_I32]]] // CHECK: [[STRIDES_PTR:%.*]] = llvm.getelementptr [[SIZES_PTR]]{{\[}}[[RANK]]] // CHECK: [[SHAPE_IN_PTR:%.*]] = llvm.extractvalue [[SHAPE]][1] : [[SHAPE_TY]] -// CHECK: [[C1_:%.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: [[RANK_MIN_1:%.*]] = llvm.sub [[RANK]], [[C1_]] : !llvm.i64 -// CHECK: llvm.br ^bb1([[RANK_MIN_1]], [[C1_]] : !llvm.i64, !llvm.i64) +// CHECK: [[C1_:%.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: [[RANK_MIN_1:%.*]] = llvm.sub [[RANK]], [[C1_]] : i64 +// CHECK: llvm.br ^bb1([[RANK_MIN_1]], [[C1_]] : i64, i64) -// CHECK: ^bb1([[DIM:%.*]]: !llvm.i64, [[CUR_STRIDE:%.*]]: !llvm.i64): -// CHECK: [[C0_:%.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: [[COND:%.*]] = llvm.icmp "sge" [[DIM]], [[C0_]] : !llvm.i64 +// CHECK: ^bb1([[DIM:%.*]]: i64, [[CUR_STRIDE:%.*]]: i64): +// CHECK: [[C0_:%.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: [[COND:%.*]] = llvm.icmp "sge" [[DIM]], [[C0_]] : i64 // CHECK: llvm.cond_br [[COND]], ^bb2, ^bb3 // CHECK: ^bb2: @@ -523,9 +523,9 @@ // CHECK: llvm.store [[SIZE]], [[TARGET_SIZE_PTR]] : !llvm.ptr // CHECK: [[TARGET_STRIDE_PTR:%.*]] = llvm.getelementptr [[STRIDES_PTR]]{{\[}}[[DIM]]] // CHECK: llvm.store [[CUR_STRIDE]], [[TARGET_STRIDE_PTR]] : !llvm.ptr -// CHECK: [[UPDATE_STRIDE:%.*]] = llvm.mul [[CUR_STRIDE]], [[SIZE]] : !llvm.i64 -// CHECK: [[STRIDE_COND:%.*]] = llvm.sub [[DIM]], [[C1_]] : !llvm.i64 -// CHECK: llvm.br ^bb1([[STRIDE_COND]], [[UPDATE_STRIDE]] : !llvm.i64, !llvm.i64) +// CHECK: [[UPDATE_STRIDE:%.*]] = llvm.mul [[CUR_STRIDE]], [[SIZE]] : i64 +// CHECK: [[STRIDE_COND:%.*]] = llvm.sub [[DIM]], [[C1_]] : i64 +// CHECK: llvm.br ^bb1([[STRIDE_COND]], [[UPDATE_STRIDE]] : i64, i64) // CHECK: ^bb3: // CHECK: llvm.return diff --git a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir @@ -19,7 +19,7 @@ func private @fifth_order_right(%arg0: () -> (() -> (() -> (() -> ())))) // Check that memrefs are converted to argument packs if appear as function arguments. -// CHECK: llvm.func @memref_call_conv(!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i64, !llvm.i64) +// CHECK: llvm.func @memref_call_conv(!llvm.ptr, !llvm.ptr, i64, i64, i64) func private @memref_call_conv(%arg0: memref) // Same in nested functions. @@ -37,25 +37,25 @@ return %bbarg : () -> () } -// CHECK-LABEL: llvm.func @body(!llvm.i32) +// CHECK-LABEL: llvm.func @body(i32) func private @body(i32) // CHECK-LABEL: llvm.func @indirect_const_call -// CHECK-SAME: (%[[ARG0:.*]]: !llvm.i32) { +// CHECK-SAME: (%[[ARG0:.*]]: i32) { func @indirect_const_call(%arg0: i32) { // CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @body : !llvm.ptr> %0 = constant @body : (i32) -> () -// CHECK-NEXT: llvm.call %[[ADDR]](%[[ARG0:.*]]) : (!llvm.i32) -> () +// CHECK-NEXT: llvm.call %[[ADDR]](%[[ARG0:.*]]) : (i32) -> () call_indirect %0(%arg0) : (i32) -> () // CHECK-NEXT: llvm.return return } -// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr>, %arg1: !llvm.float) -> !llvm.i32 { +// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr>, %arg1: !llvm.float) -> i32 { func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 { -// CHECK-NEXT: %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> !llvm.i32 +// CHECK-NEXT: %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> i32 %0 = call_indirect %arg0(%arg1) : (f32) -> i32 -// CHECK-NEXT: llvm.return %0 : !llvm.i32 +// CHECK-NEXT: llvm.return %0 : i32 return %0 : i32 } diff --git a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir @@ -11,7 +11,7 @@ // CHECK-LABEL: func @check_static_return // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-LABEL: func @check_static_return // BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr) -> !llvm.ptr { @@ -21,15 +21,15 @@ // BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(0 : index) : i64 // BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : i64 // BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(18 : index) : i64 // BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : i64 // BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : i64 // BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr @@ -40,7 +40,7 @@ // CHECK-LABEL: func @check_static_return_with_offset // CHECK-COUNT-2: !llvm.ptr -// CHECK-COUNT-5: !llvm.i64 +// CHECK-COUNT-5: i64 // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-LABEL: func @check_static_return_with_offset // BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr) -> !llvm.ptr { @@ -50,15 +50,15 @@ // BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(7 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(7 : index) : i64 // BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : i64 // BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(22 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(22 : index) : i64 // BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : i64 // BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : i64 // BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr @@ -70,28 +70,28 @@ // CHECK-LABEL: func @zero_d_alloc() -> !llvm.struct<(ptr, ptr, i64)> { // BAREPTR-LABEL: func @zero_d_alloc() -> !llvm.ptr { func @zero_d_alloc() -> memref { -// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr // CHECK-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> // CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> // CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64)> -// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> -// BAREPTR-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// BAREPTR-NEXT: llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// BAREPTR-NEXT: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr // BAREPTR-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr // BAREPTR-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> // BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> // BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64)> -// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // BAREPTR-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> %0 = alloc() : memref return %0 : memref @@ -118,48 +118,48 @@ // CHECK-LABEL: func @aligned_1d_alloc( // BAREPTR-LABEL: func @aligned_1d_alloc( func @aligned_1d_alloc() -> memref<42xf32> { -// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 -// CHECK-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : !llvm.i64 -// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64 +// CHECK-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64 +// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr // CHECK-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr -// CHECK-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: %[[one_1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_1]] : !llvm.i64 -// CHECK-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : !llvm.i64 -// CHECK-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : !llvm.i64 -// CHECK-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : !llvm.i64 -// CHECK-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : !llvm.i64 to !llvm.ptr +// CHECK-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to i64 +// CHECK-NEXT: %[[one_1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_1]] : i64 +// CHECK-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64 +// CHECK-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64 +// CHECK-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64 +// CHECK-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> -// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> -// BAREPTR-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// BAREPTR-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64 +// BAREPTR-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64 // BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// BAREPTR-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : !llvm.i64 -// BAREPTR-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : !llvm.i64 -// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (!llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// BAREPTR-NEXT: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64 +// BAREPTR-NEXT: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64 +// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr // BAREPTR-NEXT: %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr -// BAREPTR-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to !llvm.i64 -// BAREPTR-NEXT: %[[one_2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// BAREPTR-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_2]] : !llvm.i64 -// BAREPTR-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : !llvm.i64 -// BAREPTR-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : !llvm.i64 -// BAREPTR-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : !llvm.i64 -// BAREPTR-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : !llvm.i64 to !llvm.ptr +// BAREPTR-NEXT: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to i64 +// BAREPTR-NEXT: %[[one_2:.*]] = llvm.mlir.constant(1 : index) : i64 +// BAREPTR-NEXT: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_2]] : i64 +// BAREPTR-NEXT: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64 +// BAREPTR-NEXT: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64 +// BAREPTR-NEXT: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64 +// BAREPTR-NEXT: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr // BAREPTR-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // BAREPTR-NEXT: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // BAREPTR-NEXT: llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> -// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // BAREPTR-NEXT: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %0 = alloc() {alignment = 8} : memref<42xf32> return %0 : memref<42xf32> @@ -170,18 +170,18 @@ // CHECK-LABEL: func @static_alloc() -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { // BAREPTR-LABEL: func @static_alloc() -> !llvm.ptr { func @static_alloc() -> memref<32x18xf32> { -// CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64 +// CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr // CHECK-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr to !llvm.ptr -// BAREPTR: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64 +// BAREPTR: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64 // BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (!llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr // BAREPTR-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr to !llvm.ptr %0 = alloc() : memref<32x18xf32> return %0 : memref<32x18xf32> @@ -191,20 +191,20 @@ // CHECK-LABEL: func @static_alloca() -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> { func @static_alloca() -> memref<32x18xf32> { -// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : !llvm.i64 -// CHECK-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : !llvm.i64 -// CHECK-NEXT: %[[st2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : !llvm.i64 +// CHECK-NEXT: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64 +// CHECK-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64 +// CHECK-NEXT: %[[st2:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64 // CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr -// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to !llvm.i64 -// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[size_bytes]] x !llvm.float : (!llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 +// CHECK-NEXT: %[[allocated:.*]] = llvm.alloca %[[size_bytes]] x !llvm.float : (i64) -> !llvm.ptr %0 = alloca() : memref<32x18xf32> // Test with explicitly specified alignment. llvm.alloca takes care of the // alignment. The same pointer is thus used for allocation and aligned // accesses. - // CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (!llvm.i64) -> !llvm.ptr + // CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (i64) -> !llvm.ptr // CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -246,24 +246,24 @@ // CHECK-LABEL: func @static_load( // CHECK-COUNT-2: !llvm.ptr, -// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64 -// CHECK: %[[I:.*]]: !llvm.i64, -// CHECK: %[[J:.*]]: !llvm.i64) +// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64 +// CHECK: %[[I:.*]]: i64, +// CHECK: %[[J:.*]]: i64) // BAREPTR-LABEL: func @static_load -// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr, %[[I:.*]]: !llvm.i64, %[[J:.*]]: !llvm.i64) { +// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr, %[[I:.*]]: i64, %[[J:.*]]: i64) { func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr // BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64 +// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // BAREPTR-NEXT: llvm.load %[[addr]] : !llvm.ptr %0 = load %static[%i, %j] : memref<10x42xf32> return @@ -289,30 +289,30 @@ // CHECK-LABEL: func @static_store // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[I:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[J:[a-zA-Z0-9]*]]: i64 // BAREPTR-LABEL: func @static_store // BAREPTR-SAME: %[[A:.*]]: !llvm.ptr -// BAREPTR-SAME: %[[I:[a-zA-Z0-9]*]]: !llvm.i64 -// BAREPTR-SAME: %[[J:[a-zA-Z0-9]*]]: !llvm.i64 +// BAREPTR-SAME: %[[I:[a-zA-Z0-9]*]]: i64 +// BAREPTR-SAME: %[[J:[a-zA-Z0-9]*]]: i64 func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) { // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr // BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : !llvm.i64 -// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : !llvm.i64 -// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : !llvm.i64 -// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64 +// BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64 +// BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64 +// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr // BAREPTR-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr store %val, %static[%i, %j] : memref<10x42xf32> return @@ -323,25 +323,25 @@ // CHECK-LABEL: func @static_memref_dim // BAREPTR-LABEL: func @static_memref_dim(%{{.*}}: !llvm.ptr) { func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) { -// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(42 : index) : i64 // BAREPTR: llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// BAREPTR: llvm.mlir.constant(42 : index) : !llvm.i64 +// BAREPTR: llvm.mlir.constant(42 : index) : i64 %c0 = constant 0 : index %0 = dim %static, %c0 : memref<42x32x15x13x27xf32> -// CHECK: llvm.mlir.constant(32 : index) : !llvm.i64 -// BAREPTR: llvm.mlir.constant(32 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(32 : index) : i64 +// BAREPTR: llvm.mlir.constant(32 : index) : i64 %c1 = constant 1 : index %1 = dim %static, %c1 : memref<42x32x15x13x27xf32> -// CHECK: llvm.mlir.constant(15 : index) : !llvm.i64 -// BAREPTR: llvm.mlir.constant(15 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(15 : index) : i64 +// BAREPTR: llvm.mlir.constant(15 : index) : i64 %c2 = constant 2 : index %2 = dim %static, %c2 : memref<42x32x15x13x27xf32> -// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64 -// BAREPTR: llvm.mlir.constant(13 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(13 : index) : i64 +// BAREPTR: llvm.mlir.constant(13 : index) : i64 %c3 = constant 3 : index %3 = dim %static, %c3 : memref<42x32x15x13x27xf32> -// CHECK: llvm.mlir.constant(27 : index) : !llvm.i64 -// BAREPTR: llvm.mlir.constant(27 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(27 : index) : i64 +// BAREPTR: llvm.mlir.constant(27 : index) : i64 %c4 = constant 4 : index %4 = dim %static, %c4 : memref<42x32x15x13x27xf32> return @@ -361,11 +361,11 @@ // BAREPTR-NEXT: %[[desc0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // BAREPTR-NEXT: %[[desc1:.*]] = llvm.insertvalue %[[call]], %[[desc0]][0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // BAREPTR-NEXT: %[[desc2:.*]] = llvm.insertvalue %[[call]], %[[desc1]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // BAREPTR-NEXT: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 // BAREPTR-NEXT: %[[desc4:.*]] = llvm.insertvalue %[[c0]], %[[desc2]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // BAREPTR-NEXT: %[[c20:.*]] = llvm.mlir.constant(20 : index) : !llvm.i64 + // BAREPTR-NEXT: %[[c20:.*]] = llvm.mlir.constant(20 : index) : i64 // BAREPTR-NEXT: %[[desc6:.*]] = llvm.insertvalue %[[c20]], %[[desc4]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // BAREPTR-NEXT: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // BAREPTR-NEXT: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 // BAREPTR-NEXT: %[[outDesc:.*]] = llvm.insertvalue %[[c1]], %[[desc6]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %res = call @foo(%in) : (memref<10xi8>) -> (memref<20xi8>) // BAREPTR-NEXT: %[[res:.*]] = llvm.extractvalue %[[outDesc]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir @@ -9,7 +9,7 @@ return } -// CHECK-LABEL: llvm.func @body(!llvm.i64) +// CHECK-LABEL: llvm.func @body(i64) func private @body(index) // CHECK-LABEL: func @simple_loop() { @@ -21,38 +21,38 @@ br ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) // CHECK32-NEXT: ^bb1: // pred: ^bb0 -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i32 -// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32) +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i32 +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i32 +// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32) ^bb1: // pred: ^bb0 %c1 = constant 1 : index %c42 = constant 42 : index br ^bb2(%c1 : index) -// CHECK: ^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb3 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb3 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4 -// CHECK32: ^bb2({{.*}}: !llvm.i32): // 2 preds: ^bb1, ^bb3 -// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i32 +// CHECK32: ^bb2({{.*}}: i32): // 2 preds: ^bb1, ^bb3 +// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i32 // CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4 ^bb2(%0: index): // 2 preds: ^bb1, ^bb3 %1 = cmpi "slt", %0, %c42 : index cond_br %1, ^bb3, ^bb4 // CHECK: ^bb3: // pred: ^bb2 -// CHECK-NEXT: llvm.call @body({{.*}}) : (!llvm.i64) -> () -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: llvm.call @body({{.*}}) : (i64) -> () +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) // CHECK32: ^bb3: // pred: ^bb2 -// CHECK32-NEXT: llvm.call @body({{.*}}) : (!llvm.i32) -> () -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i32 -// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32) +// CHECK32-NEXT: llvm.call @body({{.*}}) : (i32) -> () +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i32 +// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i32 +// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32) ^bb3: // pred: ^bb2 call @body(%0) : (index) -> () %c1_0 = constant 1 : index @@ -155,18 +155,18 @@ return } -// CHECK-LABEL: llvm.func @body_args(!llvm.i64) -> !llvm.i64 -// CHECK32-LABEL: llvm.func @body_args(!llvm.i32) -> !llvm.i32 +// CHECK-LABEL: llvm.func @body_args(i64) -> i64 +// CHECK32-LABEL: llvm.func @body_args(i32) -> i32 func private @body_args(index) -> index -// CHECK-LABEL: llvm.func @other(!llvm.i64, !llvm.i32) -> !llvm.i32 -// CHECK32-LABEL: llvm.func @other(!llvm.i32, !llvm.i32) -> !llvm.i32 +// CHECK-LABEL: llvm.func @other(i64, i32) -> i32 +// CHECK32-LABEL: llvm.func @other(i32, i32) -> i32 func private @other(index, i32) -> i32 -// CHECK-LABEL: func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 { -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK-LABEL: func @func_args(%arg0: i32, %arg1: i32) -> i32 { +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : i32 // CHECK-NEXT: llvm.br ^bb1 -// CHECK32-LABEL: func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 { -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK32-LABEL: func @func_args(%arg0: i32, %arg1: i32) -> i32 { +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : i32 // CHECK32-NEXT: llvm.br ^bb1 func @func_args(i32, i32) -> i32 { ^bb0(%arg0: i32, %arg1: i32): @@ -174,44 +174,44 @@ br ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) // CHECK32-NEXT: ^bb1: // pred: ^bb0 -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i32 -// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32) +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i32 +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i32 +// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32) ^bb1: // pred: ^bb0 %c0 = constant 0 : index %c42 = constant 42 : index br ^bb2(%c0 : index) -// CHECK-NEXT: ^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb3 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK-NEXT: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb3 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4 -// CHECK32-NEXT: ^bb2({{.*}}: !llvm.i32): // 2 preds: ^bb1, ^bb3 -// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i32 +// CHECK32-NEXT: ^bb2({{.*}}: i32): // 2 preds: ^bb1, ^bb3 +// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i32 // CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4 ^bb2(%0: index): // 2 preds: ^bb1, ^bb3 %1 = cmpi "slt", %0, %c42 : index cond_br %1, ^bb3, ^bb4 // CHECK-NEXT: ^bb3: // pred: ^bb2 -// CHECK-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (!llvm.i64) -> !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (!llvm.i64, !llvm.i32) -> !llvm.i32 -// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i32) -> !llvm.i32 -// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (!llvm.i64, !llvm.i32) -> !llvm.i32 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (i64) -> i64 +// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (i64, i32) -> i32 +// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i64, i32) -> i32 +// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (i64, i32) -> i32 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) // CHECK32-NEXT: ^bb3: // pred: ^bb2 -// CHECK32-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (!llvm.i32) -> !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (!llvm.i32, !llvm.i32) -> !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i32, !llvm.i32) -> !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i32 -// CHECK32-NEXT: llvm.br ^bb2({{.*}} : !llvm.i32) +// CHECK32-NEXT: {{.*}} = llvm.call @body_args({{.*}}) : (i32) -> i32 +// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg0) : (i32, i32) -> i32 +// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i32, i32) -> i32 +// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, %arg1) : (i32, i32) -> i32 +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i32 +// CHECK32-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i32 +// CHECK32-NEXT: llvm.br ^bb2({{.*}} : i32) ^bb3: // pred: ^bb2 %2 = call @body_args(%0) : (index) -> index %3 = call @other(%2, %arg0) : (index, i32) -> i32 @@ -222,29 +222,29 @@ br ^bb2(%6 : index) // CHECK-NEXT: ^bb4: // pred: ^bb2 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i32) -> !llvm.i32 -// CHECK-NEXT: llvm.return {{.*}} : !llvm.i32 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i64, i32) -> i32 +// CHECK-NEXT: llvm.return {{.*}} : i32 // CHECK32-NEXT: ^bb4: // pred: ^bb2 -// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i32 -// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (!llvm.i32, !llvm.i32) -> !llvm.i32 -// CHECK32-NEXT: llvm.return {{.*}} : !llvm.i32 +// CHECK32-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i32 +// CHECK32-NEXT: {{.*}} = llvm.call @other({{.*}}, {{.*}}) : (i32, i32) -> i32 +// CHECK32-NEXT: llvm.return {{.*}} : i32 ^bb4: // pred: ^bb2 %c0_0 = constant 0 : index %7 = call @other(%c0_0, %c0_i32) : (index, i32) -> i32 return %7 : i32 } -// CHECK-LABEL: llvm.func @pre(!llvm.i64) -// CHECK32-LABEL: llvm.func @pre(!llvm.i32) +// CHECK-LABEL: llvm.func @pre(i64) +// CHECK32-LABEL: llvm.func @pre(i32) func private @pre(index) -// CHECK-LABEL: llvm.func @body2(!llvm.i64, !llvm.i64) -// CHECK32-LABEL: llvm.func @body2(!llvm.i32, !llvm.i32) +// CHECK-LABEL: llvm.func @body2(i64, i64) +// CHECK32-LABEL: llvm.func @body2(i32, i32) func private @body2(index, index) -// CHECK-LABEL: llvm.func @post(!llvm.i64) -// CHECK32-LABEL: llvm.func @post(!llvm.i32) +// CHECK-LABEL: llvm.func @post(i64) +// CHECK32-LABEL: llvm.func @post(i32) func private @post(index) // CHECK-LABEL: func @imperfectly_nested_loops() { @@ -254,49 +254,49 @@ br ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) ^bb1: // pred: ^bb0 %c0 = constant 0 : index %c42 = constant 42 : index br ^bb2(%c0 : index) -// CHECK-NEXT: ^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb7 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK-NEXT: ^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb7 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb8 ^bb2(%0: index): // 2 preds: ^bb1, ^bb7 %1 = cmpi "slt", %0, %c42 : index cond_br %1, ^bb3, ^bb8 // CHECK-NEXT: ^bb3: -// CHECK-NEXT: llvm.call @pre({{.*}}) : (!llvm.i64) -> () +// CHECK-NEXT: llvm.call @pre({{.*}}) : (i64) -> () // CHECK-NEXT: llvm.br ^bb4 ^bb3: // pred: ^bb2 call @pre(%0) : (index) -> () br ^bb4 // CHECK-NEXT: ^bb4: // pred: ^bb3 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64) +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : i64 +// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64) ^bb4: // pred: ^bb3 %c7 = constant 7 : index %c56 = constant 56 : index br ^bb5(%c7 : index) -// CHECK-NEXT: ^bb5({{.*}}: !llvm.i64): // 2 preds: ^bb4, ^bb6 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK-NEXT: ^bb5({{.*}}: i64): // 2 preds: ^bb4, ^bb6 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb6, ^bb7 ^bb5(%2: index): // 2 preds: ^bb4, ^bb6 %3 = cmpi "slt", %2, %c56 : index cond_br %3, ^bb6, ^bb7 // CHECK-NEXT: ^bb6: // pred: ^bb5 -// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i64) -> () -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64) +// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (i64, i64) -> () +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64) ^bb6: // pred: ^bb5 call @body2(%0, %2) : (index, index) -> () %c2 = constant 2 : index @@ -304,10 +304,10 @@ br ^bb5(%4 : index) // CHECK-NEXT: ^bb7: // pred: ^bb5 -// CHECK-NEXT: llvm.call @post({{.*}}) : (!llvm.i64) -> () -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: llvm.call @post({{.*}}) : (i64) -> () +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) ^bb7: // pred: ^bb5 call @post(%0) : (index) -> () %c1 = constant 1 : index @@ -320,57 +320,57 @@ return } -// CHECK-LABEL: llvm.func @mid(!llvm.i64) +// CHECK-LABEL: llvm.func @mid(i64) func private @mid(index) -// CHECK-LABEL: llvm.func @body3(!llvm.i64, !llvm.i64) +// CHECK-LABEL: llvm.func @body3(i64, i64) func private @body3(index, index) // A complete function transformation check. // CHECK-LABEL: func @more_imperfectly_nested_loops() { // CHECK-NEXT: llvm.br ^bb1 // CHECK-NEXT:^bb1: // pred: ^bb0 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) -// CHECK-NEXT:^bb2({{.*}}: !llvm.i64): // 2 preds: ^bb1, ^bb11 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(42 : index) : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) +// CHECK-NEXT:^bb2({{.*}}: i64): // 2 preds: ^bb1, ^bb11 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb12 // CHECK-NEXT:^bb3: // pred: ^bb2 -// CHECK-NEXT: llvm.call @pre({{.*}}) : (!llvm.i64) -> () +// CHECK-NEXT: llvm.call @pre({{.*}}) : (i64) -> () // CHECK-NEXT: llvm.br ^bb4 // CHECK-NEXT:^bb4: // pred: ^bb3 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64) -// CHECK-NEXT:^bb5({{.*}}: !llvm.i64): // 2 preds: ^bb4, ^bb6 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(7 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(56 : index) : i64 +// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64) +// CHECK-NEXT:^bb5({{.*}}: i64): // 2 preds: ^bb4, ^bb6 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb6, ^bb7 // CHECK-NEXT:^bb6: // pred: ^bb5 -// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i64) -> () -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb5({{.*}} : !llvm.i64) +// CHECK-NEXT: llvm.call @body2({{.*}}, {{.*}}) : (i64, i64) -> () +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(2 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb5({{.*}} : i64) // CHECK-NEXT:^bb7: // pred: ^bb5 -// CHECK-NEXT: llvm.call @mid({{.*}}) : (!llvm.i64) -> () +// CHECK-NEXT: llvm.call @mid({{.*}}) : (i64) -> () // CHECK-NEXT: llvm.br ^bb8 // CHECK-NEXT:^bb8: // pred: ^bb7 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(18 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(37 : index) : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb9({{.*}} : !llvm.i64) -// CHECK-NEXT:^bb9({{.*}}: !llvm.i64): // 2 preds: ^bb8, ^bb10 -// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : !llvm.i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(18 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(37 : index) : i64 +// CHECK-NEXT: llvm.br ^bb9({{.*}} : i64) +// CHECK-NEXT:^bb9({{.*}}: i64): // 2 preds: ^bb8, ^bb10 +// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64 // CHECK-NEXT: llvm.cond_br {{.*}}, ^bb10, ^bb11 // CHECK-NEXT:^bb10: // pred: ^bb9 -// CHECK-NEXT: llvm.call @body3({{.*}}, {{.*}}) : (!llvm.i64, !llvm.i64) -> () -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb9({{.*}} : !llvm.i64) +// CHECK-NEXT: llvm.call @body3({{.*}}, {{.*}}) : (i64, i64) -> () +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(3 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb9({{.*}} : i64) // CHECK-NEXT:^bb11: // pred: ^bb9 -// CHECK-NEXT: llvm.call @post({{.*}}) : (!llvm.i64) -> () -// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.br ^bb2({{.*}} : !llvm.i64) +// CHECK-NEXT: llvm.call @post({{.*}}) : (i64) -> () +// CHECK-NEXT: {{.*}} = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 +// CHECK-NEXT: llvm.br ^bb2({{.*}} : i64) // CHECK-NEXT:^bb12: // pred: ^bb2 // CHECK-NEXT: llvm.return // CHECK-NEXT: } @@ -423,7 +423,7 @@ return } -// CHECK-LABEL: llvm.func @get_i64() -> !llvm.i64 +// CHECK-LABEL: llvm.func @get_i64() -> i64 func private @get_i64() -> (i64) // CHECK-LABEL: llvm.func @get_f32() -> !llvm.float func private @get_f32() -> (f32) @@ -441,10 +441,10 @@ // CHECK32-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr, ptr, i32, array<4 x i32>, array<4 x i32>)>)> { func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) { ^bb0: -// CHECK-NEXT: {{.*}} = llvm.call @get_i64() : () -> !llvm.i64 +// CHECK-NEXT: {{.*}} = llvm.call @get_i64() : () -> i64 // CHECK-NEXT: {{.*}} = llvm.call @get_f32() : () -> !llvm.float // CHECK-NEXT: {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr, ptr, i64, array<4 x i64>, array<4 x i64>)> -// CHECK32-NEXT: {{.*}} = llvm.call @get_i64() : () -> !llvm.i64 +// CHECK32-NEXT: {{.*}} = llvm.call @get_i64() : () -> i64 // CHECK32-NEXT: {{.*}} = llvm.call @get_f32() : () -> !llvm.float // CHECK32-NEXT: {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr, ptr, i32, array<4 x i32>, array<4 x i32>)> %0 = call @get_i64() : () -> (i64) @@ -478,7 +478,7 @@ // CHECK32-NEXT: {{.*}} = llvm.extractvalue {{.*}}[2] : !llvm.struct<(i64, float, struct<(ptr, ptr, i32, array<4 x i32>, array<4 x i32>)>)> %0:3 = call @multireturn() : () -> (i64, f32, memref<42x?x10x?xf32>) %1 = constant 42 : i64 -// CHECK: {{.*}} = llvm.add {{.*}}, {{.*}} : !llvm.i64 +// CHECK: {{.*}} = llvm.add {{.*}}, {{.*}} : i64 %2 = addi %0#0, %1 : i64 %3 = constant 42.0 : f32 // CHECK: {{.*}} = llvm.fadd {{.*}}, {{.*}} : !llvm.float @@ -525,29 +525,29 @@ ^bb0(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64): // CHECK-NEXT: %0 = llvm.fsub %arg0, %arg1 : !llvm.float %0 = subf %arg0, %arg1: f32 -// CHECK-NEXT: %1 = llvm.sub %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %1 = llvm.sub %arg2, %arg3 : i32 %1 = subi %arg2, %arg3: i32 -// CHECK-NEXT: %2 = llvm.icmp "slt" %arg2, %1 : !llvm.i32 +// CHECK-NEXT: %2 = llvm.icmp "slt" %arg2, %1 : i32 %2 = cmpi "slt", %arg2, %1 : i32 -// CHECK-NEXT: %3 = llvm.sdiv %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %3 = llvm.sdiv %arg2, %arg3 : i32 %3 = divi_signed %arg2, %arg3 : i32 -// CHECK-NEXT: %4 = llvm.udiv %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %4 = llvm.udiv %arg2, %arg3 : i32 %4 = divi_unsigned %arg2, %arg3 : i32 -// CHECK-NEXT: %5 = llvm.srem %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %5 = llvm.srem %arg2, %arg3 : i32 %5 = remi_signed %arg2, %arg3 : i32 -// CHECK-NEXT: %6 = llvm.urem %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %6 = llvm.urem %arg2, %arg3 : i32 %6 = remi_unsigned %arg2, %arg3 : i32 -// CHECK-NEXT: %7 = llvm.select %2, %arg2, %arg3 : !llvm.i1, !llvm.i32 +// CHECK-NEXT: %7 = llvm.select %2, %arg2, %arg3 : i1, i32 %7 = select %2, %arg2, %arg3 : i32 // CHECK-NEXT: %8 = llvm.fdiv %arg0, %arg1 : !llvm.float %8 = divf %arg0, %arg1 : f32 // CHECK-NEXT: %9 = llvm.frem %arg0, %arg1 : !llvm.float %9 = remf %arg0, %arg1 : f32 -// CHECK-NEXT: %10 = llvm.and %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %10 = llvm.and %arg2, %arg3 : i32 %10 = and %arg2, %arg3 : i32 -// CHECK-NEXT: %11 = llvm.or %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %11 = llvm.or %arg2, %arg3 : i32 %11 = or %arg2, %arg3 : i32 -// CHECK-NEXT: %12 = llvm.xor %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %12 = llvm.xor %arg2, %arg3 : i32 %12 = xor %arg2, %arg3 : i32 // CHECK-NEXT: %13 = "llvm.intr.exp"(%arg0) : (!llvm.float) -> !llvm.float %13 = std.exp %arg0 : f32 @@ -555,11 +555,11 @@ %14 = std.exp2 %arg0 : f32 // CHECK-NEXT: %15 = llvm.mlir.constant(7.900000e-01 : f64) : !llvm.double %15 = constant 7.9e-01 : f64 -// CHECK-NEXT: %16 = llvm.shl %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %16 = llvm.shl %arg2, %arg3 : i32 %16 = shift_left %arg2, %arg3 : i32 -// CHECK-NEXT: %17 = llvm.ashr %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %17 = llvm.ashr %arg2, %arg3 : i32 %17 = shift_right_signed %arg2, %arg3 : i32 -// CHECK-NEXT: %18 = llvm.lshr %arg2, %arg3 : !llvm.i32 +// CHECK-NEXT: %18 = llvm.lshr %arg2, %arg3 : i32 %18 = shift_right_unsigned %arg2, %arg3 : i32 // CHECK-NEXT: %{{[0-9]+}} = "llvm.intr.sqrt"(%arg0) : (!llvm.float) -> !llvm.float %19 = std.sqrt %arg0 : f32 @@ -573,9 +573,9 @@ // make this test dependent on the pointer size on the target system. // CHECK-LABEL: @index_cast func @index_cast(%arg0: index, %arg1: i1) { -// CHECK-NEXT: = llvm.trunc %arg0 : !llvm.i{{.*}} to !llvm.i1 +// CHECK-NEXT: = llvm.trunc %arg0 : i{{.*}} to i1 %0 = index_cast %arg0: index to i1 -// CHECK-NEXT: = llvm.sext %arg1 : !llvm.i1 to !llvm.i{{.*}} +// CHECK-NEXT: = llvm.sext %arg1 : i1 to i{{.*}} %1 = index_cast %arg1: i1 to index return } @@ -583,13 +583,13 @@ // Checking conversion of signed integer types to floating point. // CHECK-LABEL: @sitofp func @sitofp(%arg0 : i32, %arg1 : i64) { -// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i32 to !llvm.float +// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to !llvm.float %0 = sitofp %arg0: i32 to f32 -// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i32 to !llvm.double +// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to !llvm.double %1 = sitofp %arg0: i32 to f64 -// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i64 to !llvm.float +// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to !llvm.float %2 = sitofp %arg1: i64 to f32 -// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.i64 to !llvm.double +// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to !llvm.double %3 = sitofp %arg1: i64 to f64 return } @@ -615,13 +615,13 @@ // Checking conversion of unsigned integer types to floating point. // CHECK-LABEL: @uitofp func @uitofp(%arg0 : i32, %arg1 : i64) { -// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i32 to !llvm.float +// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to !llvm.float %0 = uitofp %arg0: i32 to f32 -// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i32 to !llvm.double +// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to !llvm.double %1 = uitofp %arg0: i32 to f64 -// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i64 to !llvm.float +// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to !llvm.float %2 = uitofp %arg1: i64 to f32 -// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.i64 to !llvm.double +// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to !llvm.double %3 = uitofp %arg1: i64 to f64 return } @@ -653,13 +653,13 @@ // Checking conversion of floating point to integer types. // CHECK-LABEL: @fptosi func @fptosi(%arg0 : f32, %arg1 : f64) { -// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to !llvm.i32 +// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to i32 %0 = fptosi %arg0: f32 to i32 -// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to !llvm.i64 +// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to i64 %1 = fptosi %arg0: f32 to i64 -// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to !llvm.i32 +// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to i32 %2 = fptosi %arg1: f64 to i32 -// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to !llvm.i64 +// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to i64 %3 = fptosi %arg1: f64 to i64 return } @@ -685,13 +685,13 @@ // Checking conversion of floating point to integer types. // CHECK-LABEL: @fptoui func @fptoui(%arg0 : f32, %arg1 : f64) { -// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to !llvm.i32 +// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to i32 %0 = fptoui %arg0: f32 to i32 -// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to !llvm.i64 +// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to i64 %1 = fptoui %arg0: f32 to i64 -// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to !llvm.i32 +// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to i32 %2 = fptoui %arg1: f64 to i32 -// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to !llvm.i64 +// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to i64 %3 = fptoui %arg1: f64 to i64 return } @@ -759,27 +759,27 @@ // Check sign and zero extension and truncation of integers. // CHECK-LABEL: @integer_extension_and_truncation func @integer_extension_and_truncation() { -// CHECK-NEXT: %0 = llvm.mlir.constant(-3 : i3) : !llvm.i3 +// CHECK-NEXT: %0 = llvm.mlir.constant(-3 : i3) : i3 %0 = constant 5 : i3 -// CHECK-NEXT: = llvm.sext %0 : !llvm.i3 to !llvm.i6 +// CHECK-NEXT: = llvm.sext %0 : i3 to i6 %1 = sexti %0 : i3 to i6 -// CHECK-NEXT: = llvm.zext %0 : !llvm.i3 to !llvm.i6 +// CHECK-NEXT: = llvm.zext %0 : i3 to i6 %2 = zexti %0 : i3 to i6 -// CHECK-NEXT: = llvm.trunc %0 : !llvm.i3 to !llvm.i2 +// CHECK-NEXT: = llvm.trunc %0 : i3 to i2 %3 = trunci %0 : i3 to i2 return } // CHECK-LABEL: @dfs_block_order func @dfs_block_order(%arg0: i32) -> (i32) { -// CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : !llvm.i32 +// CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : i32 %0 = constant 42 : i32 // CHECK-NEXT: llvm.br ^bb2 br ^bb2 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: %[[ADD:.*]] = llvm.add %arg0, %[[CST]] : !llvm.i32 -// CHECK-NEXT: llvm.return %[[ADD]] : !llvm.i32 +// CHECK-NEXT: %[[ADD:.*]] = llvm.add %arg0, %[[CST]] : i32 +// CHECK-NEXT: llvm.return %[[ADD]] : i32 ^bb1: %2 = addi %arg0, %0 : i32 return %2 : i32 @@ -860,29 +860,29 @@ return %r : vector<4xf32> } // CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.undef : !llvm.vec<4 x float> -// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : !llvm.i32] : !llvm.vec<4 x float> +// CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : !llvm.vec<4 x float> // CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] // CHECK-NEXT: %[[SCALE:[0-9]+]] = llvm.fmul %[[A]], %[[SPLAT]] : !llvm.vec<4 x float> // CHECK-NEXT: llvm.return %[[SCALE]] : !llvm.vec<4 x float> // CHECK-LABEL: func @view( -// CHECK: %[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64 +// CHECK: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64 func @view(%arg0 : index, %arg1 : index, %arg2 : index) { - // CHECK: llvm.mlir.constant(2048 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(2048 : index) : i64 // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %0 = alloc() : memref<2048xi8> // Test two dynamic sizes. // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_PTR:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + // CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[CAST_SHIFTED_BASE_PTR:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR]] : !llvm.ptr to !llvm.ptr // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[C0]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.mul %{{.*}}, %[[ARG1]] @@ -892,15 +892,15 @@ // Test one dynamic size. // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_PTR_2:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + // CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[CAST_SHIFTED_BASE_PTR_2:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_2]] : !llvm.ptr to !llvm.ptr // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_2]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[C0_2]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.mul %{{.*}}, %[[ARG1]] // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -909,40 +909,40 @@ // Test static sizes. // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_PTR_3:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + // CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[CAST_SHIFTED_BASE_PTR_3:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_3]] : !llvm.ptr to !llvm.ptr // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_3]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[C0_3]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(64 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(64 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> %5 = view %0[%arg2][] : memref<2048xi8> to memref<64x4xf32> // Test view memory space. - // CHECK: llvm.mlir.constant(2048 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(2048 : index) : i64 // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> %6 = alloc() : memref<2048xi8, 4> // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_PTR_4:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + // CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[CAST_SHIFTED_BASE_PTR_4:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_4]] : !llvm.ptr to !llvm.ptr // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_4]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[C0_4:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[C0_4:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[C0_4]], %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(64 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(64 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 + // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> %7 = view %6[%arg2][] : memref<2048xi8, 4> to memref<64x4xf32, 4> @@ -951,16 +951,16 @@ // CHECK-LABEL: func @subview( // CHECK-COUNT-2: !llvm.ptr, -// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64, -// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i64, -// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i64, -// CHECK: %[[ARG2:.*]]: !llvm.i64) +// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64, +// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: i64, +// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: i64, +// CHECK: %[[ARG2:.*]]: i64) // CHECK32-LABEL: func @subview( // CHECK32-COUNT-2: !llvm.ptr, -// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i32, -// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i32, -// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i32, -// CHECK32: %[[ARG2:.*]]: !llvm.i32) +// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32, +// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: i32, +// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: i32, +// CHECK32: %[[ARG2:.*]]: i32) func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) { // The last "insertvalue" that populates the memref descriptor from the function arguments. // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] @@ -974,16 +974,16 @@ // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64 - // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64 - // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64 + // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64 + // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64 // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64 + // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64 // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr @@ -993,16 +993,16 @@ // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32 - // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32 - // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32 - // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32 + // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32 + // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32 + // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32 + // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32 // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32 // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32 %1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] : memref<64x4xf32, offset: 0, strides: [4, 1]> @@ -1012,16 +1012,16 @@ // CHECK-LABEL: func @subview_non_zero_addrspace( // CHECK-COUNT-2: !llvm.ptr, -// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64, -// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i64, -// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i64, -// CHECK: %[[ARG2:.*]]: !llvm.i64) +// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64, +// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: i64, +// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: i64, +// CHECK: %[[ARG2:.*]]: i64) // CHECK32-LABEL: func @subview_non_zero_addrspace( // CHECK32-COUNT-2: !llvm.ptr, -// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i32, -// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i32, -// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i32, -// CHECK32: %[[ARG2:.*]]: !llvm.i32) +// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32, +// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: i32, +// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: i32, +// CHECK32: %[[ARG2:.*]]: i32) func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1], 3>, %arg0 : index, %arg1 : index, %arg2 : index) { // The last "insertvalue" that populates the memref descriptor from the function arguments. // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] @@ -1035,16 +1035,16 @@ // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64 - // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64 - // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64 + // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64 + // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64 // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64 + // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64 // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr @@ -1054,16 +1054,16 @@ // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32 - // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32 - // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32 - // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32 + // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32 + // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32 + // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32 + // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32 // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32 // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32 %1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] : memref<64x4xf32, offset: 0, strides: [4, 1], 3> @@ -1074,25 +1074,25 @@ // CHECK-LABEL: func @subview_const_size( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr, // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr, -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i64 // CHECK32-LABEL: func @subview_const_size( // CHECK32-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr, // CHECK32-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr, -// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i32 +// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i32 func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) { // The last "insertvalue" that populates the memref descriptor from the function arguments. // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] @@ -1106,18 +1106,18 @@ // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64 - // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64 - // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64 + // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64 + // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64 + // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64 // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64) // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64 + // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64 // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64) // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64 // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr to !llvm.ptr @@ -1127,18 +1127,18 @@ // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32 - // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32 - // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32 - // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32 + // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32 + // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32 + // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32 + // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32 // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64) // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32 // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64) // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32 // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> %1 = subview %0[%arg0, %arg1][4, 2][%arg0, %arg1] : memref<64x4xf32, offset: 0, strides: [4, 1]> @@ -1149,25 +1149,25 @@ // CHECK-LABEL: func @subview_const_stride( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr, // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr, -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i64 -// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i64 +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i64 +// CHECK-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i64 // CHECK32-LABEL: func @subview_const_stride( // CHECK32-SAME: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr, // CHECK32-SAME: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr, -// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i32 -// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i32 +// CHECK32-SAME: %[[ARG2:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG3:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG4:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG5:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG6:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG7:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG8:[a-zA-Z0-9]*]]: i32 +// CHECK32-SAME: %[[ARG9:[a-zA-Z0-9]*]]: i32 func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) { // The last "insertvalue" that populates the memref descriptor from the function arguments. // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] @@ -1181,10 +1181,10 @@ // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64 - // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64 - // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64 + // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64 + // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64 + // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64 // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64) @@ -1200,10 +1200,10 @@ // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32 - // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32 - // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32 - // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32 + // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32 + // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32 + // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32 + // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32 // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64) @@ -1249,16 +1249,16 @@ // CHECK-LABEL: func @subview_mixed_static_dynamic( // CHECK-COUNT-2: !llvm.ptr, -// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i64, -// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i64, -// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i64, -// CHECK: %[[ARG2:.*]]: !llvm.i64) +// CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64, +// CHECK: %[[ARG0:[a-zA-Z0-9]*]]: i64, +// CHECK: %[[ARG1:[a-zA-Z0-9]*]]: i64, +// CHECK: %[[ARG2:.*]]: i64) // CHECK32-LABEL: func @subview_mixed_static_dynamic( // CHECK32-COUNT-2: !llvm.ptr, -// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: !llvm.i32, -// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: !llvm.i32, -// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: !llvm.i32, -// CHECK32: %[[ARG2:.*]]: !llvm.i32) +// CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32, +// CHECK32: %[[ARG0:[a-zA-Z0-9]*]]: i32, +// CHECK32: %[[ARG1:[a-zA-Z0-9]*]]: i32, +// CHECK32: %[[ARG2:.*]]: i32) func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) { // The last "insertvalue" that populates the memref descriptor from the function arguments. // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] @@ -1272,19 +1272,19 @@ // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[OFFM1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] : !llvm.i32 - // CHECK32: %[[OFFA1:.*]] = llvm.add %[[OFF]], %[[OFFM1]] : !llvm.i32 - // CHECK32: %[[CST8:.*]] = llvm.mlir.constant(8 : i64) : !llvm.i32 - // CHECK32: %[[OFFM2:.*]] = llvm.mul %[[CST8]], %[[STRIDE1]] : !llvm.i32 - // CHECK32: %[[OFFA2:.*]] = llvm.add %[[OFFA1]], %[[OFFM2]] : !llvm.i32 + // CHECK32: %[[OFFM1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] : i32 + // CHECK32: %[[OFFA1:.*]] = llvm.add %[[OFF]], %[[OFFM1]] : i32 + // CHECK32: %[[CST8:.*]] = llvm.mlir.constant(8 : i64) : i32 + // CHECK32: %[[OFFM2:.*]] = llvm.mul %[[CST8]], %[[STRIDE1]] : i32 + // CHECK32: %[[OFFA2:.*]] = llvm.add %[[OFFA1]], %[[OFFM2]] : i32 // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFFA2]], %[[DESC1]][2] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[CST1:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i32 + // CHECK32: %[[CST1:.*]] = llvm.mlir.constant(1 : i64) : i32 // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[CST62:.*]] = llvm.mlir.constant(62 : i64) : !llvm.i32 + // CHECK32: %[[CST62:.*]] = llvm.mlir.constant(62 : i64) : i32 // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST62]], %[[DESC4]][3, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> - // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32 + // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32 // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr, ptr, i32, array<2 x i32>, array<2 x i32>)> %1 = subview %0[%arg1, 8][62, %arg2][%arg0, 1] : memref<64x4xf32, offset: 0, strides: [4, 1]> @@ -1323,20 +1323,20 @@ atomic_yield %c1 : i32 } // CHECK: [[init:%.*]] = llvm.load %{{.*}} : !llvm.ptr - // CHECK-NEXT: llvm.br ^bb1([[init]] : !llvm.i32) - // CHECK-NEXT: ^bb1([[loaded:%.*]]: !llvm.i32): + // CHECK-NEXT: llvm.br ^bb1([[init]] : i32) + // CHECK-NEXT: ^bb1([[loaded:%.*]]: i32): // CHECK-NEXT: [[c1:%.*]] = llvm.mlir.constant(1 : i32) // CHECK-NEXT: [[pair:%.*]] = llvm.cmpxchg %{{.*}}, [[loaded]], [[c1]] - // CHECK-SAME: acq_rel monotonic : !llvm.i32 + // CHECK-SAME: acq_rel monotonic : i32 // CHECK-NEXT: [[new:%.*]] = llvm.extractvalue [[pair]][0] // CHECK-NEXT: [[ok:%.*]] = llvm.extractvalue [[pair]][1] - // CHECK-NEXT: llvm.cond_br [[ok]], ^bb2, ^bb1([[new]] : !llvm.i32) + // CHECK-NEXT: llvm.cond_br [[ok]], ^bb2, ^bb1([[new]] : i32) // CHECK-NEXT: ^bb2: %c2 = constant 2 : i32 %add = addi %c2, %x : i32 return %add : i32 // CHECK-NEXT: [[c2:%.*]] = llvm.mlir.constant(2 : i32) - // CHECK-NEXT: [[add:%.*]] = llvm.add [[c2]], [[new]] : !llvm.i32 + // CHECK-NEXT: [[add:%.*]] = llvm.add [[c2]], [[new]] : i32 // CHECK-NEXT: llvm.return [[add]] } @@ -1345,12 +1345,12 @@ // CHECK-LABEL: func @assume_alignment func @assume_alignment(%0 : memref<4x4xf16>) { // CHECK: %[[PTR:.*]] = llvm.extractvalue %[[MEMREF:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : !llvm.i64 - // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr to !llvm.i64 - // CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : !llvm.i64 - // CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : !llvm.i64 - // CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (!llvm.i1) -> () + // CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 + // CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : i64 + // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr to i64 + // CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : i64 + // CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : i64 + // CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (i1) -> () assume_alignment %0, 16 : memref<4x4xf16> return } @@ -1404,11 +1404,11 @@ // CHECK-LABEL: func @memref_index // CHECK-SAME: %arg0: !llvm.ptr, %arg1: !llvm.ptr, -// CHECK-SAME: %arg2: !llvm.i64, %arg3: !llvm.i64, %arg4: !llvm.i64) +// CHECK-SAME: %arg2: i64, %arg3: i64, %arg4: i64) // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK32-LABEL: func @memref_index // CHECK32-SAME: %arg0: !llvm.ptr, %arg1: !llvm.ptr, -// CHECK32-SAME: %arg2: !llvm.i32, %arg3: !llvm.i32, %arg4: !llvm.i32) +// CHECK32-SAME: %arg2: i32, %arg3: i32, %arg4: i32) // CHECK32-SAME: -> !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)> func @memref_index(%arg0: memref<32xindex>) -> memref<32xindex> { return %arg0 : memref<32xindex> @@ -1434,8 +1434,8 @@ %rank = rank %ranked : memref return } -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK32: llvm.mlir.constant(1 : index) : !llvm.i32 +// CHECK: llvm.mlir.constant(1 : index) : i64 +// CHECK32: llvm.mlir.constant(1 : index) : i32 // ----- @@ -1449,7 +1449,7 @@ // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(i64, ptr)> // CHECK-NEXT: llvm.insertvalue // CHECK-NEXT: %[[UNRANKED_DESC:.*]] = llvm.insertvalue -// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: %[[RANKED_DESC:.*]] = llvm.extractvalue %[[UNRANKED_DESC]][1] // CHECK-SAME: : !llvm.struct<(i64, ptr)> @@ -1457,21 +1457,21 @@ // CHECK-NEXT: %[[ZERO_D_DESC:.*]] = llvm.bitcast %[[RANKED_DESC]] // CHECK-SAME: : !llvm.ptr to !llvm.ptr, ptr, i64)>> -// CHECK-NEXT: %[[C2_i32:.*]] = llvm.mlir.constant(2 : i32) : !llvm.i32 -// CHECK-NEXT: %[[C0_:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 +// CHECK-NEXT: %[[C2_i32:.*]] = llvm.mlir.constant(2 : i32) : i32 +// CHECK-NEXT: %[[C0_:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-NEXT: %[[OFFSET_PTR:.*]] = llvm.getelementptr %[[ZERO_D_DESC]]{{\[}} // CHECK-SAME: %[[C0_]], %[[C2_i32]]] : (!llvm.ptr, ptr, -// CHECK-SAME: i64)>>, !llvm.i64, !llvm.i32) -> !llvm.ptr +// CHECK-SAME: i64)>>, i64, i32) -> !llvm.ptr -// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[INDEX_INC:.*]] = llvm.add %[[C1]], %[[C0]] : !llvm.i64 +// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: %[[INDEX_INC:.*]] = llvm.add %[[C1]], %[[C0]] : i64 // CHECK-NEXT: %[[SIZE_PTR:.*]] = llvm.getelementptr %[[OFFSET_PTR]]{{\[}} -// CHECK-SAME: %[[INDEX_INC]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: %[[INDEX_INC]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: %[[SIZE:.*]] = llvm.load %[[SIZE_PTR]] : !llvm.ptr -// CHECK-NEXT: llvm.return %[[SIZE]] : !llvm.i64 +// CHECK-NEXT: llvm.return %[[SIZE]] : i64 // CHECK32: %[[SIZE:.*]] = llvm.load %{{.*}} : !llvm.ptr -// CHECK32-NEXT: llvm.return %[[SIZE]] : !llvm.i32 +// CHECK32-NEXT: llvm.return %[[SIZE]] : i32 diff --git a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir --- a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir +++ b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir @@ -96,7 +96,7 @@ // Lowers `assert` to a function call to `abort` if the assertion is violated. // CHECK: llvm.func @abort() // CHECK-LABEL: @assert_test_function -// CHECK-SAME: (%[[ARG:.*]]: !llvm.i1) +// CHECK-SAME: (%[[ARG:.*]]: i1) func @assert_test_function(%arg : i1) { // CHECK: llvm.cond_br %[[ARG]], ^[[CONTINUATION_BLOCK:.*]], ^[[FAILURE_BLOCK:.*]] // CHECK: ^[[CONTINUATION_BLOCK]]: @@ -141,17 +141,17 @@ // CHECK-LABEL: func @get_gv0_memref func @get_gv0_memref() { %0 = get_global_memref @gv0 : memref<2xf32> - // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 - // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : i64 + // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr>, !llvm.i64, !llvm.i64) -> !llvm.ptr - // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : !llvm.i64 - // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : !llvm.i64 to !llvm.ptr + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 + // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr + // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64 + // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> - // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK: llvm.insertvalue %[[DIM]], {{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK: llvm.insertvalue %[[STRIDE]], {{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> @@ -161,18 +161,18 @@ // Test 2D memref. // CHECK-LABEL: func @get_gv2_memref func @get_gv2_memref() { - // CHECK: %[[DIM0:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 - // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64 - // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 + // CHECK: %[[DIM0:.*]] = llvm.mlir.constant(2 : index) : i64 + // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : i64 + // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr>> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr>>, !llvm.i64, !llvm.i64, !llvm.i64) -> !llvm.ptr - // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : !llvm.i64 - // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : !llvm.i64 to !llvm.ptr + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 + // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr>>, i64, i64, i64) -> !llvm.ptr + // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64 + // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[DIM0]], {{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.insertvalue %[[DIM1]], {{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -190,14 +190,14 @@ // CHECK-LABEL: func @get_gv3_memref func @get_gv3_memref() { // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr - // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : !llvm.i64 - // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : !llvm.i64 to !llvm.ptr + // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 + // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr, i64) -> !llvm.ptr + // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64 + // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64)> - // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> %0 = get_global_memref @gv3 : memref return diff --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir --- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir @@ -2,22 +2,22 @@ // RUN: mlir-opt %s --convert-vector-to-llvm='enable-index-optimizations=0' | FileCheck %s --check-prefix=CMP64 // CMP32-LABEL: llvm.func @genbool_var_1d( -// CMP32-SAME: %[[A:.*]]: !llvm.i64) +// CMP32-SAME: %[[A:.*]]: i64) // CMP32: %[[T0:.*]] = llvm.mlir.constant(dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : vector<11xi32>) : !llvm.vec<11 x i32> -// CMP32: %[[T1:.*]] = llvm.trunc %[[A]] : !llvm.i64 to !llvm.i32 +// CMP32: %[[T1:.*]] = llvm.trunc %[[A]] : i64 to i32 // CMP32: %[[T2:.*]] = llvm.mlir.undef : !llvm.vec<11 x i32> -// CMP32: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CMP32: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%[[T3]] : !llvm.i32] : !llvm.vec<11 x i32> +// CMP32: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CMP32: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%[[T3]] : i32] : !llvm.vec<11 x i32> // CMP32: %[[T5:.*]] = llvm.shufflevector %[[T4]], %[[T2]] [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<11 x i32>, !llvm.vec<11 x i32> // CMP32: %[[T6:.*]] = llvm.icmp "slt" %[[T0]], %[[T5]] : !llvm.vec<11 x i32> // CMP32: llvm.return %[[T6]] : !llvm.vec<11 x i1> // CMP64-LABEL: llvm.func @genbool_var_1d( -// CMP64-SAME: %[[A:.*]]: !llvm.i64) +// CMP64-SAME: %[[A:.*]]: i64) // CMP64: %[[T0:.*]] = llvm.mlir.constant(dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : vector<11xi64>) : !llvm.vec<11 x i64> // CMP64: %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<11 x i64> -// CMP64: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CMP64: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : !llvm.i32] : !llvm.vec<11 x i64> +// CMP64: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CMP64: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<11 x i64> // CMP64: %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T1]] [0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<11 x i64>, !llvm.vec<11 x i64> // CMP64: %[[T5:.*]] = llvm.icmp "slt" %[[T0]], %[[T4]] : !llvm.vec<11 x i64> // CMP64: llvm.return %[[T5]] : !llvm.vec<11 x i1> diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -7,8 +7,8 @@ // CHECK-LABEL: llvm.func @broadcast_vec1d_from_scalar( // CHECK-SAME: %[[A:.*]]: !llvm.float) // CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.vec<2 x float> -// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T2:.*]] = llvm.insertelement %[[A]], %[[T0]][%[[T1]] : !llvm.i32] : !llvm.vec<2 x float> +// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T2:.*]] = llvm.insertelement %[[A]], %[[T0]][%[[T1]] : i32] : !llvm.vec<2 x float> // CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T0]] [0 : i32, 0 : i32] : !llvm.vec<2 x float>, !llvm.vec<2 x float> // CHECK: llvm.return %[[T3]] : !llvm.vec<2 x float> @@ -20,8 +20,8 @@ // CHECK-SAME: %[[A:.*]]: !llvm.float) // CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vec<3 x float>> // CHECK: %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>> // CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][1] : !llvm.array<2 x vec<3 x float>> @@ -35,8 +35,8 @@ // CHECK-SAME: %[[A:.*]]: !llvm.float) // CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x array<3 x vec<4 x float>>> // CHECK: %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<4 x float> -// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : !llvm.i32] : !llvm.vec<4 x float> +// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<4 x float> // CHECK: %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0, 0] : !llvm.array<2 x array<3 x vec<4 x float>>> // CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0, 1] : !llvm.array<2 x array<3 x vec<4 x float>>> @@ -102,11 +102,11 @@ } // CHECK-LABEL: llvm.func @broadcast_stretch( // CHECK-SAME: %[[A:.*]]: !llvm.vec<1 x float>) -// CHECK: %[[T0:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : !llvm.i64] : !llvm.vec<1 x float> +// CHECK: %[[T0:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : !llvm.vec<1 x float> // CHECK: %[[T2:.*]] = llvm.mlir.undef : !llvm.vec<4 x float> -// CHECK: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%3 : !llvm.i32] : !llvm.vec<4 x float> +// CHECK: %[[T3:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%3 : i32] : !llvm.vec<4 x float> // CHECK: %[[T5:.*]] = llvm.shufflevector %[[T4]], %[[T2]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float> // CHECK: llvm.return %[[T5]] : !llvm.vec<4 x float> @@ -131,35 +131,35 @@ // CHECK-SAME: %[[A:.*]]: !llvm.array<4 x vec<1 x float>>) // CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3xf32>) : !llvm.array<4 x vec<3 x float>> // CHECK: %[[T1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<4 x vec<1 x float>> -// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T3:.*]] = llvm.extractelement %[[T1]][%[[T2]] : !llvm.i64] : !llvm.vec<1 x float> +// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T3:.*]] = llvm.extractelement %[[T1]][%[[T2]] : i64] : !llvm.vec<1 x float> // CHECK: %[[T4:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T5:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T6:.*]] = llvm.insertelement %[[T3]], %[[T4]][%[[T5]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T5:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T6:.*]] = llvm.insertelement %[[T3]], %[[T4]][%[[T5]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T7:.*]] = llvm.shufflevector %[[T6]], %[[T4]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<4 x vec<3 x float>> // CHECK: %[[T9:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<4 x vec<1 x float>> -// CHECK: %[[T10:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T11:.*]] = llvm.extractelement %[[T9]][%[[T10]] : !llvm.i64] : !llvm.vec<1 x float> +// CHECK: %[[T10:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T11:.*]] = llvm.extractelement %[[T9]][%[[T10]] : i64] : !llvm.vec<1 x float> // CHECK: %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<4 x vec<3 x float>> // CHECK: %[[T17:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<1 x float>> -// CHECK: %[[T18:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T19:.*]] = llvm.extractelement %[[T17]][%[[T18]] : !llvm.i64] : !llvm.vec<1 x float> +// CHECK: %[[T18:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T19:.*]] = llvm.extractelement %[[T17]][%[[T18]] : i64] : !llvm.vec<1 x float> // CHECK: %[[T20:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T21:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T22:.*]] = llvm.insertelement %[[T19]], %[[T20]][%[[T21]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T21:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T22:.*]] = llvm.insertelement %[[T19]], %[[T20]][%[[T21]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T23:.*]] = llvm.shufflevector %[[T22]], %[[T20]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T24:.*]] = llvm.insertvalue %[[T23]], %[[T16]][2] : !llvm.array<4 x vec<3 x float>> // CHECK: %[[T25:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<1 x float>> -// CHECK: %[[T26:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T27:.*]] = llvm.extractelement %[[T25]][%[[T26]] : !llvm.i64] : !llvm.vec<1 x float> +// CHECK: %[[T26:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T27:.*]] = llvm.extractelement %[[T25]][%[[T26]] : i64] : !llvm.vec<1 x float> // CHECK: %[[T28:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T29:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T30:.*]] = llvm.insertelement %[[T27]], %[[T28]][%[[T29]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T29:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T30:.*]] = llvm.insertelement %[[T27]], %[[T28]][%[[T29]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T31:.*]] = llvm.shufflevector %[[T30]], %[[T28]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T32:.*]] = llvm.insertvalue %[[T31]], %[[T24]][3] : !llvm.array<4 x vec<3 x float>> // CHECK: llvm.return %[[T32]] : !llvm.array<4 x vec<3 x float>> @@ -202,19 +202,19 @@ // CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>, // CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>) // CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x3xf32>) -// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x float> // CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%4 : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%4 : i32] : !llvm.vec<3 x float> // CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T7:.*]] = llvm.fmul %[[T6]], %[[B]] : !llvm.vec<3 x float> // CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>> -// CHECK: %[[T9:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i64 -// CHECK: %[[T10:.*]] = llvm.extractelement %[[A]][%9 : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[T9:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[T10:.*]] = llvm.extractelement %[[A]][%9 : i64] : !llvm.vec<2 x float> // CHECK: %[[T11:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T12:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T13:.*]] = llvm.insertelement %[[T10]], %[[T11]][%12 : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T12:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T13:.*]] = llvm.insertelement %[[T10]], %[[T11]][%12 : i32] : !llvm.vec<3 x float> // CHECK: %[[T14:.*]] = llvm.shufflevector %[[T13]], %[[T11]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T15:.*]] = llvm.fmul %[[T14]], %[[B]] : !llvm.vec<3 x float> // CHECK: %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<2 x vec<3 x float>> @@ -229,20 +229,20 @@ // CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>, // CHECK-SAME: %[[C:.*]]: !llvm.array<2 x vec<3 x float>>) // CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x3xf32>) -// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x float> // CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%[[T4]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%[[T4]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T7:.*]] = llvm.extractvalue %[[C]][0] : !llvm.array<2 x vec<3 x float>> // CHECK: %[[T8:.*]] = "llvm.intr.fmuladd"(%[[T6]], %[[B]], %[[T7]]) : (!llvm.vec<3 x float>, !llvm.vec<3 x float>, !llvm.vec<3 x float>) // CHECK: %[[T9:.*]] = llvm.insertvalue %[[T8]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>> -// CHECK: %[[T10:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i64 -// CHECK: %[[T11:.*]] = llvm.extractelement %[[A]][%[[T10]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[T10:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[T11:.*]] = llvm.extractelement %[[A]][%[[T10]] : i64] : !llvm.vec<2 x float> // CHECK: %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x float> -// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : !llvm.i32] : !llvm.vec<3 x float> +// CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x float> // CHECK: %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float> // CHECK: %[[T16:.*]] = llvm.extractvalue %[[C]][1] : !llvm.array<2 x vec<3 x float>> // CHECK: %[[T17:.*]] = "llvm.intr.fmuladd"(%[[T15]], %[[B]], %[[T16]]) : (!llvm.vec<3 x float>, !llvm.vec<3 x float>, !llvm.vec<3 x float>) @@ -267,26 +267,26 @@ // CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>, // CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>) // CHECK: %[[u0:.*]] = llvm.mlir.undef : !llvm.vec<5 x float> -// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: %[[e1:.*]] = llvm.extractelement %[[B]][%[[c2]] : !llvm.i64] : !llvm.vec<3 x float> -// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[i1:.*]] = llvm.insertelement %[[e1]], %[[u0]][%[[c0]] : !llvm.i64] : !llvm.vec<5 x float> -// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[e2:.*]] = llvm.extractelement %[[B]][%[[c1]] : !llvm.i64] : !llvm.vec<3 x float> -// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[i2:.*]] = llvm.insertelement %[[e2]], %[[i1]][%[[c1]] : !llvm.i64] : !llvm.vec<5 x float> -// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[e3:.*]] = llvm.extractelement %[[B]][%[[c0]] : !llvm.i64] : !llvm.vec<3 x float> -// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: %[[i3:.*]] = llvm.insertelement %[[e3]], %[[i2]][%[[c2]] : !llvm.i64] : !llvm.vec<5 x float> -// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[e4:.*]] = llvm.extractelement %[[A]][%[[c1]] : !llvm.i64] : !llvm.vec<2 x float> -// CHECK: %[[c3:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK: %[[i4:.*]] = llvm.insertelement %[[e4]], %[[i3]][%[[c3]] : !llvm.i64] : !llvm.vec<5 x float> -// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[e5:.*]] = llvm.extractelement %[[A]][%[[c0]] : !llvm.i64] : !llvm.vec<2 x float> -// CHECK: %[[c4:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 -// CHECK: %[[i5:.*]] = llvm.insertelement %[[e5]], %[[i4]][%[[c4]] : !llvm.i64] : !llvm.vec<5 x float> +// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[e1:.*]] = llvm.extractelement %[[B]][%[[c2]] : i64] : !llvm.vec<3 x float> +// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[i1:.*]] = llvm.insertelement %[[e1]], %[[u0]][%[[c0]] : i64] : !llvm.vec<5 x float> +// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[e2:.*]] = llvm.extractelement %[[B]][%[[c1]] : i64] : !llvm.vec<3 x float> +// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[i2:.*]] = llvm.insertelement %[[e2]], %[[i1]][%[[c1]] : i64] : !llvm.vec<5 x float> +// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[e3:.*]] = llvm.extractelement %[[B]][%[[c0]] : i64] : !llvm.vec<3 x float> +// CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[i3:.*]] = llvm.insertelement %[[e3]], %[[i2]][%[[c2]] : i64] : !llvm.vec<5 x float> +// CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[e4:.*]] = llvm.extractelement %[[A]][%[[c1]] : i64] : !llvm.vec<2 x float> +// CHECK: %[[c3:.*]] = llvm.mlir.constant(3 : index) : i64 +// CHECK: %[[i4:.*]] = llvm.insertelement %[[e4]], %[[i3]][%[[c3]] : i64] : !llvm.vec<5 x float> +// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[e5:.*]] = llvm.extractelement %[[A]][%[[c0]] : i64] : !llvm.vec<2 x float> +// CHECK: %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64 +// CHECK: %[[i5:.*]] = llvm.insertelement %[[e5]], %[[i4]][%[[c4]] : i64] : !llvm.vec<5 x float> // CHECK: llvm.return %[[i5]] : !llvm.vec<5 x float> func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> { @@ -312,8 +312,8 @@ } // CHECK-LABEL: llvm.func @extract_element( // CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x float>) -// CHECK: %[[c:.*]] = llvm.mlir.constant(15 : i32) : !llvm.i32 -// CHECK: %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : !llvm.i32] : !llvm.vec<16 x float> +// CHECK: %[[c:.*]] = llvm.mlir.constant(15 : i32) : i32 +// CHECK: %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : i32] : !llvm.vec<16 x float> // CHECK: llvm.return %[[x]] : !llvm.float func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 { @@ -321,8 +321,8 @@ return %0 : f32 } // CHECK-LABEL: llvm.func @extract_element_from_vec_1d -// CHECK: llvm.mlir.constant(15 : i64) : !llvm.i64 -// CHECK: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<16 x float> +// CHECK: llvm.mlir.constant(15 : i64) : i64 +// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float> // CHECK: llvm.return {{.*}} : !llvm.float func @extract_vec_2d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> { @@ -347,8 +347,8 @@ } // CHECK-LABEL: llvm.func @extract_element_from_vec_3d // CHECK: llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vec<16 x float>>> -// CHECK: llvm.mlir.constant(0 : i64) : !llvm.i64 -// CHECK: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<16 x float> +// CHECK: llvm.mlir.constant(0 : i64) : i64 +// CHECK: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float> // CHECK: llvm.return {{.*}} : !llvm.float func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { @@ -359,8 +359,8 @@ // CHECK-LABEL: llvm.func @insert_element( // CHECK-SAME: %[[A:.*]]: !llvm.float, // CHECK-SAME: %[[B:.*]]: !llvm.vec<4 x float>) -// CHECK: %[[c:.*]] = llvm.mlir.constant(3 : i32) : !llvm.i32 -// CHECK: %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : !llvm.i32] : !llvm.vec<4 x float> +// CHECK: %[[c:.*]] = llvm.mlir.constant(3 : i32) : i32 +// CHECK: %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : i32] : !llvm.vec<4 x float> // CHECK: llvm.return %[[x]] : !llvm.vec<4 x float> func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> { @@ -368,8 +368,8 @@ return %0 : vector<4xf32> } // CHECK-LABEL: llvm.func @insert_element_into_vec_1d -// CHECK: llvm.mlir.constant(3 : i64) : !llvm.i64 -// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float> +// CHECK: llvm.mlir.constant(3 : i64) : i64 +// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float> // CHECK: llvm.return {{.*}} : !llvm.vec<4 x float> func @insert_vec_2d_into_vec_3d(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> { @@ -394,8 +394,8 @@ } // CHECK-LABEL: llvm.func @insert_element_into_vec_3d // CHECK: llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>> -// CHECK: llvm.mlir.constant(15 : i64) : !llvm.i64 -// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<16 x float> +// CHECK: llvm.mlir.constant(15 : i64) : i64 +// CHECK: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float> // CHECK: llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>> // CHECK: llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x float>>> @@ -437,9 +437,9 @@ // Type "boolean" always uses zero extension. // // CHECK-LABEL: llvm.func @vector_print_scalar_i1( -// CHECK-SAME: %[[A:.*]]: !llvm.i1) -// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i1 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i1) +// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i1 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_i4(%arg0: i4) { @@ -447,9 +447,9 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_i4( -// CHECK-SAME: %[[A:.*]]: !llvm.i4) -// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i4) +// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i4 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_si4(%arg0: si4) { @@ -457,9 +457,9 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_si4( -// CHECK-SAME: %[[A:.*]]: !llvm.i4) -// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i4) +// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i4 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_ui4(%arg0: ui4) { @@ -467,9 +467,9 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_ui4( -// CHECK-SAME: %[[A:.*]]: !llvm.i4) -// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i4 to !llvm.i64 -// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i4) +// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i4 to i64 +// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_i32(%arg0: i32) { @@ -477,9 +477,9 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_i32( -// CHECK-SAME: %[[A:.*]]: !llvm.i32) -// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i32 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i32) +// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i32 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_ui32(%arg0: ui32) { @@ -487,18 +487,18 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_ui32( -// CHECK-SAME: %[[A:.*]]: !llvm.i32) -// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i32 to !llvm.i64 -// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i32) +// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i32 to i64 +// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () func @vector_print_scalar_i40(%arg0: i40) { vector.print %arg0 : i40 return } // CHECK-LABEL: llvm.func @vector_print_scalar_i40( -// CHECK-SAME: %[[A:.*]]: !llvm.i40) -// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i40) +// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i40 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_si40(%arg0: si40) { @@ -506,9 +506,9 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_si40( -// CHECK-SAME: %[[A:.*]]: !llvm.i40) -// CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64 -// CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i40) +// CHECK: %[[S:.*]] = llvm.sext %[[A]] : i40 to i64 +// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_ui40(%arg0: ui40) { @@ -516,9 +516,9 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_ui40( -// CHECK-SAME: %[[A:.*]]: !llvm.i40) -// CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i40 to !llvm.i64 -// CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i40) +// CHECK: %[[S:.*]] = llvm.zext %[[A]] : i40 to i64 +// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_i64(%arg0: i64) { @@ -526,8 +526,8 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_i64( -// CHECK-SAME: %[[A:.*]]: !llvm.i64) -// CHECK: llvm.call @printI64(%[[A]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i64) +// CHECK: llvm.call @printI64(%[[A]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_ui64(%arg0: ui64) { @@ -535,8 +535,8 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_ui64( -// CHECK-SAME: %[[A:.*]]: !llvm.i64) -// CHECK: llvm.call @printU64(%[[A]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i64) +// CHECK: llvm.call @printU64(%[[A]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_index(%arg0: index) { @@ -544,8 +544,8 @@ return } // CHECK-LABEL: llvm.func @vector_print_scalar_index( -// CHECK-SAME: %[[A:.*]]: !llvm.i64) -// CHECK: llvm.call @printU64(%[[A]]) : (!llvm.i64) -> () +// CHECK-SAME: %[[A:.*]]: i64) +// CHECK: llvm.call @printU64(%[[A]]) : (i64) -> () // CHECK: llvm.call @printNewline() : () -> () func @vector_print_scalar_f32(%arg0: f32) { @@ -575,23 +575,23 @@ // CHECK: llvm.call @printOpen() : () -> () // CHECK: %[[x0:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<2 x float>> // CHECK: llvm.call @printOpen() : () -> () -// CHECK: %[[x1:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[x1:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : i64] : !llvm.vec<2 x float> // CHECK: llvm.call @printF32(%[[x2]]) : (!llvm.float) -> () // CHECK: llvm.call @printComma() : () -> () -// CHECK: %[[x3:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[x3:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : i64] : !llvm.vec<2 x float> // CHECK: llvm.call @printF32(%[[x4]]) : (!llvm.float) -> () // CHECK: llvm.call @printClose() : () -> () // CHECK: llvm.call @printComma() : () -> () // CHECK: %[[x5:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<2 x float>> // CHECK: llvm.call @printOpen() : () -> () -// CHECK: %[[x6:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[x6:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : i64] : !llvm.vec<2 x float> // CHECK: llvm.call @printF32(%[[x7]]) : (!llvm.float) -> () // CHECK: llvm.call @printComma() : () -> () -// CHECK: %[[x8:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : !llvm.i64] : !llvm.vec<2 x float> +// CHECK: %[[x8:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : i64] : !llvm.vec<2 x float> // CHECK: llvm.call @printF32(%[[x9]]) : (!llvm.float) -> () // CHECK: llvm.call @printClose() : () -> () // CHECK: llvm.call @printClose() : () -> () @@ -652,30 +652,30 @@ // CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vec<2 x float>> // CHECK-NEXT: llvm.extractvalue {{.*}}[2] : !llvm.array<4 x vec<4 x float>> // Element @0 -> element @2 -// CHECK-NEXT: llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float> -// CHECK-NEXT: llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float> +// CHECK-NEXT: llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float> +// CHECK-NEXT: llvm.mlir.constant(2 : index) : i64 +// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float> // Element @1 -> element @3 -// CHECK-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float> -// CHECK-NEXT: llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float> +// CHECK-NEXT: llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float> +// CHECK-NEXT: llvm.mlir.constant(3 : index) : i64 +// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float> // CHECK-NEXT: llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x vec<4 x float>> // // Subvector vector<2xf32> @1 into vector<4xf32> @3 // CHECK: llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vec<2 x float>> // CHECK-NEXT: llvm.extractvalue {{.*}}[3] : !llvm.array<4 x vec<4 x float>> // Element @0 -> element @2 -// CHECK-NEXT: llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float> -// CHECK-NEXT: llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float> +// CHECK-NEXT: llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float> +// CHECK-NEXT: llvm.mlir.constant(2 : index) : i64 +// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float> // Element @1 -> element @3 -// CHECK-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<2 x float> -// CHECK-NEXT: llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : !llvm.i64] : !llvm.vec<4 x float> +// CHECK-NEXT: llvm.mlir.constant(1 : index) : i64 +// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float> +// CHECK-NEXT: llvm.mlir.constant(3 : index) : i64 +// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float> // CHECK-NEXT: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x vec<4 x float>> func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -> vector<16x4x8xf32> { @@ -689,41 +689,41 @@ // CHECK: %[[s0:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x float>>> // CHECK: %[[s1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<4 x float>> // CHECK: %[[s2:.*]] = llvm.extractvalue %[[B]][0, 0] : !llvm.array<16 x array<4 x vec<8 x float>>> -// CHECK: %[[s3:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[s4:.*]] = llvm.extractelement %[[s1]][%[[s3]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s5:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: %[[s6:.*]] = llvm.insertelement %[[s4]], %[[s2]][%[[s5]] : !llvm.i64] : !llvm.vec<8 x float> -// CHECK: %[[s7:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[s8:.*]] = llvm.extractelement %[[s1]][%[[s7]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s9:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK: %[[s10:.*]] = llvm.insertelement %[[s8]], %[[s6]][%[[s9]] : !llvm.i64] : !llvm.vec<8 x float> -// CHECK: %[[s11:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: %[[s12:.*]] = llvm.extractelement %[[s1]][%[[s11]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s13:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 -// CHECK: %[[s14:.*]] = llvm.insertelement %[[s12]], %[[s10]][%[[s13]] : !llvm.i64] : !llvm.vec<8 x float> -// CHECK: %[[s15:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK: %[[s16:.*]] = llvm.extractelement %[[s1]][%[[s15]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s17:.*]] = llvm.mlir.constant(5 : index) : !llvm.i64 -// CHECK: %[[s18:.*]] = llvm.insertelement %[[s16]], %[[s14]][%[[s17]] : !llvm.i64] : !llvm.vec<8 x float> +// CHECK: %[[s3:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[s4:.*]] = llvm.extractelement %[[s1]][%[[s3]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s5:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[s6:.*]] = llvm.insertelement %[[s4]], %[[s2]][%[[s5]] : i64] : !llvm.vec<8 x float> +// CHECK: %[[s7:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[s8:.*]] = llvm.extractelement %[[s1]][%[[s7]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s9:.*]] = llvm.mlir.constant(3 : index) : i64 +// CHECK: %[[s10:.*]] = llvm.insertelement %[[s8]], %[[s6]][%[[s9]] : i64] : !llvm.vec<8 x float> +// CHECK: %[[s11:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[s12:.*]] = llvm.extractelement %[[s1]][%[[s11]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s13:.*]] = llvm.mlir.constant(4 : index) : i64 +// CHECK: %[[s14:.*]] = llvm.insertelement %[[s12]], %[[s10]][%[[s13]] : i64] : !llvm.vec<8 x float> +// CHECK: %[[s15:.*]] = llvm.mlir.constant(3 : index) : i64 +// CHECK: %[[s16:.*]] = llvm.extractelement %[[s1]][%[[s15]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s17:.*]] = llvm.mlir.constant(5 : index) : i64 +// CHECK: %[[s18:.*]] = llvm.insertelement %[[s16]], %[[s14]][%[[s17]] : i64] : !llvm.vec<8 x float> // CHECK: %[[s19:.*]] = llvm.insertvalue %[[s18]], %[[s0]][0] : !llvm.array<4 x vec<8 x float>> // CHECK: %[[s20:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<4 x float>> // CHECK: %[[s21:.*]] = llvm.extractvalue %[[B]][0, 1] : !llvm.array<16 x array<4 x vec<8 x float>>> -// CHECK: %[[s22:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK: %[[s23:.*]] = llvm.extractelement %[[s20]][%[[s22]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s24:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: %[[s25:.*]] = llvm.insertelement %[[s23]], %[[s21]][%[[s24]] : !llvm.i64] : !llvm.vec<8 x float> -// CHECK: %[[s26:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK: %[[s27:.*]] = llvm.extractelement %[[s20]][%[[s26]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s28:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK: %[[s29:.*]] = llvm.insertelement %[[s27]], %[[s25]][%[[s28]] : !llvm.i64] : !llvm.vec<8 x float> -// CHECK: %[[s30:.*]] = llvm.mlir.constant(2 : index) : !llvm.i64 -// CHECK: %[[s31:.*]] = llvm.extractelement %[[s20]][%[[s30]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s32:.*]] = llvm.mlir.constant(4 : index) : !llvm.i64 -// CHECK: %[[s33:.*]] = llvm.insertelement %[[s31]], %[[s29]][%[[s32]] : !llvm.i64] : !llvm.vec<8 x float> -// CHECK: %[[s34:.*]] = llvm.mlir.constant(3 : index) : !llvm.i64 -// CHECK: %[[s35:.*]] = llvm.extractelement %[[s20]][%[[s34]] : !llvm.i64] : !llvm.vec<4 x float> -// CHECK: %[[s36:.*]] = llvm.mlir.constant(5 : index) : !llvm.i64 -// CHECK: %[[s37:.*]] = llvm.insertelement %[[s35]], %[[s33]][%[[s36]] : !llvm.i64] : !llvm.vec<8 x float> +// CHECK: %[[s22:.*]] = llvm.mlir.constant(0 : index) : i64 +// CHECK: %[[s23:.*]] = llvm.extractelement %[[s20]][%[[s22]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s24:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[s25:.*]] = llvm.insertelement %[[s23]], %[[s21]][%[[s24]] : i64] : !llvm.vec<8 x float> +// CHECK: %[[s26:.*]] = llvm.mlir.constant(1 : index) : i64 +// CHECK: %[[s27:.*]] = llvm.extractelement %[[s20]][%[[s26]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s28:.*]] = llvm.mlir.constant(3 : index) : i64 +// CHECK: %[[s29:.*]] = llvm.insertelement %[[s27]], %[[s25]][%[[s28]] : i64] : !llvm.vec<8 x float> +// CHECK: %[[s30:.*]] = llvm.mlir.constant(2 : index) : i64 +// CHECK: %[[s31:.*]] = llvm.extractelement %[[s20]][%[[s30]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s32:.*]] = llvm.mlir.constant(4 : index) : i64 +// CHECK: %[[s33:.*]] = llvm.insertelement %[[s31]], %[[s29]][%[[s32]] : i64] : !llvm.vec<8 x float> +// CHECK: %[[s34:.*]] = llvm.mlir.constant(3 : index) : i64 +// CHECK: %[[s35:.*]] = llvm.extractelement %[[s20]][%[[s34]] : i64] : !llvm.vec<4 x float> +// CHECK: %[[s36:.*]] = llvm.mlir.constant(5 : index) : i64 +// CHECK: %[[s37:.*]] = llvm.insertelement %[[s35]], %[[s33]][%[[s36]] : i64] : !llvm.vec<8 x float> // CHECK: %[[s38:.*]] = llvm.insertvalue %[[s37]], %[[s19]][1] : !llvm.array<4 x vec<8 x float>> // CHECK: %[[s39:.*]] = llvm.insertvalue %[[s38]], %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x float>>> // CHECK: llvm.return %[[s39]] : !llvm.array<16 x array<4 x vec<8 x float>>> @@ -807,7 +807,7 @@ // CHECK-LABEL: llvm.func @reduce_i8( // CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x i8>) // CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: llvm.return %[[V]] : !llvm.i8 +// CHECK: llvm.return %[[V]] : i8 func @reduce_i32(%arg0: vector<16xi32>) -> i32 { %0 = vector.reduction "add", %arg0 : vector<16xi32> into i32 @@ -816,7 +816,7 @@ // CHECK-LABEL: llvm.func @reduce_i32( // CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x i32>) // CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: llvm.return %[[V]] : !llvm.i32 +// CHECK: llvm.return %[[V]] : i32 func @reduce_i64(%arg0: vector<16xi64>) -> i64 { %0 = vector.reduction "add", %arg0 : vector<16xi64> into i64 @@ -825,7 +825,7 @@ // CHECK-LABEL: llvm.func @reduce_i64( // CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x i64>) // CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]]) -// CHECK: llvm.return %[[V]] : !llvm.i64 +// CHECK: llvm.return %[[V]] : i64 // 4x16 16x3 4x3 @@ -851,11 +851,11 @@ return %f: vector<17xf32> } // CHECK-LABEL: func @transfer_read_1d -// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float> +// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float> // // 1. Bitcast to vector form. // CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} : -// CHECK-SAME: (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] : // CHECK-SAME: !llvm.ptr to !llvm.ptr> // CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 0] : @@ -867,11 +867,11 @@ // CHECK-SAME: vector<17xi32>) : !llvm.vec<17 x i32> // // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. -// CHECK: %[[otrunc:.*]] = llvm.trunc %[[BASE]] : !llvm.i64 to !llvm.i32 +// CHECK: %[[otrunc:.*]] = llvm.trunc %[[BASE]] : i64 to i32 // CHECK: %[[offsetVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32> -// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[offsetVec2:.*]] = llvm.insertelement %[[otrunc]], %[[offsetVec]][%[[c0]] : -// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32> +// CHECK-SAME: i32] : !llvm.vec<17 x i32> // CHECK: %[[offsetVec3:.*]] = llvm.shufflevector %[[offsetVec2]], %{{.*}} [ // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, @@ -882,11 +882,11 @@ // // 4. Let dim the memref dimension, compute the vector comparison mask: // [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ] -// CHECK: %[[dtrunc:.*]] = llvm.trunc %[[DIM]] : !llvm.i64 to !llvm.i32 +// CHECK: %[[dtrunc:.*]] = llvm.trunc %[[DIM]] : i64 to i32 // CHECK: %[[dimVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32> -// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[dimVec2:.*]] = llvm.insertelement %[[dtrunc]], %[[dimVec]][%[[c01]] : -// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32> +// CHECK-SAME: i32] : !llvm.vec<17 x i32> // CHECK: %[[dimVec3:.*]] = llvm.shufflevector %[[dimVec2]], %{{.*}} [ // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, @@ -905,7 +905,7 @@ // // 1. Bitcast to vector form. // CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} : -// CHECK-SAME: (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[vecPtr_b:.*]] = llvm.bitcast %[[gep_b]] : // CHECK-SAME: !llvm.ptr to !llvm.ptr> // @@ -942,16 +942,16 @@ return %f: vector<17xf32> } // CHECK-LABEL: func @transfer_read_2d_to_1d -// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: !llvm.i64, %[[BASE_1:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float> +// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: i64, %[[BASE_1:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float> // CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 1] : // CHECK-SAME: !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // // Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. -// CHECK: %[[trunc:.*]] = llvm.trunc %[[BASE_1]] : !llvm.i64 to !llvm.i32 +// CHECK: %[[trunc:.*]] = llvm.trunc %[[BASE_1]] : i64 to i32 // CHECK: %[[offsetVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32> -// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[offsetVec2:.*]] = llvm.insertelement %[[trunc]], %[[offsetVec]][%[[c0]] : -// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32> +// CHECK-SAME: i32] : !llvm.vec<17 x i32> // CHECK: %[[offsetVec3:.*]] = llvm.shufflevector %[[offsetVec2]], %{{.*}} [ // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, @@ -960,11 +960,11 @@ // // Let dim the memref dimension, compute the vector comparison mask: // [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ] -// CHECK: %[[dimtrunc:.*]] = llvm.trunc %[[DIM]] : !llvm.i64 to !llvm.i32 +// CHECK: %[[dimtrunc:.*]] = llvm.trunc %[[DIM]] : i64 to i32 // CHECK: %[[dimVec:.*]] = llvm.mlir.undef : !llvm.vec<17 x i32> -// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 +// CHECK: %[[c01:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[dimVec2:.*]] = llvm.insertelement %[[dimtrunc]], %[[dimVec]][%[[c01]] : -// CHECK-SAME: !llvm.i32] : !llvm.vec<17 x i32> +// CHECK-SAME: i32] : !llvm.vec<17 x i32> // CHECK: %[[dimVec3:.*]] = llvm.shufflevector %[[dimVec2]], %{{.*}} [ // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, // CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, @@ -982,11 +982,11 @@ return %f: vector<17xf32> } // CHECK-LABEL: func @transfer_read_1d_non_zero_addrspace -// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float> +// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float> // // 1. Check address space for GEP is correct. // CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} : -// CHECK-SAME: (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[vecPtr:.*]] = llvm.addrspacecast %[[gep]] : // CHECK-SAME: !llvm.ptr to !llvm.ptr> // @@ -996,7 +996,7 @@ // // 3. Check address apce for GEP is correct. // CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} : -// CHECK-SAME: (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[vecPtr_b:.*]] = llvm.addrspacecast %[[gep_b]] : // CHECK-SAME: !llvm.ptr to !llvm.ptr> @@ -1007,11 +1007,11 @@ return %f: vector<17xf32> } // CHECK-LABEL: func @transfer_read_1d_not_masked -// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<17 x float> +// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float> // // 1. Bitcast to vector form. // CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} : -// CHECK-SAME: (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] : // CHECK-SAME: !llvm.ptr to !llvm.ptr> // @@ -1025,11 +1025,11 @@ return %v: vector<12xi8> } // CHECK-LABEL: func @transfer_read_1d_cast -// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm.vec<12 x i8> +// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<12 x i8> // // 1. Bitcast to vector form. // CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} : -// CHECK-SAME: (!llvm.ptr, !llvm.i64) -> !llvm.ptr +// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] : // CHECK-SAME: !llvm.ptr to !llvm.ptr> // diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir --- a/mlir/test/Dialect/GPU/outlining.mlir +++ b/mlir/test/Dialect/GPU/outlining.mlir @@ -215,7 +215,7 @@ // ----- -llvm.mlir.global internal @global(42 : i64) : !llvm.i64 +llvm.mlir.global internal @global(42 : i64) : i64 //CHECK-LABEL: @function_call func @function_call(%arg0 : memref) { @@ -249,7 +249,7 @@ // CHECK: llvm.mlir.addressof @global : !llvm.ptr // CHECK: gpu.return // -// CHECK: llvm.mlir.global internal @global(42 : i64) : !llvm.i64 +// CHECK: llvm.mlir.global internal @global(42 : i64) : i64 // // CHECK: func @device_function() // CHECK: func @recursive_device_function() diff --git a/mlir/test/Dialect/LLVMIR/dialect-cast.mlir b/mlir/test/Dialect/LLVMIR/dialect-cast.mlir --- a/mlir/test/Dialect/LLVMIR/dialect-cast.mlir +++ b/mlir/test/Dialect/LLVMIR/dialect-cast.mlir @@ -7,9 +7,8 @@ %6: vector<42xf32>, %7: memref<42xf32>, %8: memref, %9: memref, %10: memref<*xf32>) { - llvm.mlir.cast %0 : index to !llvm.i64 - llvm.mlir.cast %0 : index to !llvm.i32 - llvm.mlir.cast %1 : i32 to !llvm.i32 + llvm.mlir.cast %0 : index to i64 + llvm.mlir.cast %0 : index to i32 llvm.mlir.cast %2 : bf16 to !llvm.bfloat llvm.mlir.cast %3 : f16 to !llvm.half llvm.mlir.cast %4 : f32 to !llvm.float @@ -71,19 +70,12 @@ // ----- func @mlir_dialect_cast_integer_non_integer(%0 : i16) { - // expected-error@+1 {{invalid cast between integer and non-integer type}} + // expected-error@+1 {{unsupported cast}} llvm.mlir.cast %0 : i16 to !llvm.half } // ----- -func @mlir_dialect_cast_integer_bitwidth_mismatch(%0 : i16) { - // expected-error@+1 {{invalid cast between integers with mismatching bitwidth}} - llvm.mlir.cast %0 : i16 to !llvm.i32 -} - -// ----- - func @mlir_dialect_cast_nd_vector(%0 : vector<2x2xf32>) { // expected-error@+1 {{only 1-d vector is allowed}} llvm.mlir.cast %0 : vector<2x2xf32> to !llvm.vec<4xfloat> diff --git a/mlir/test/Dialect/LLVMIR/func.mlir b/mlir/test/Dialect/LLVMIR/func.mlir --- a/mlir/test/Dialect/LLVMIR/func.mlir +++ b/mlir/test/Dialect/LLVMIR/func.mlir @@ -14,24 +14,24 @@ // GENERIC: sym_name = "bar" // GENERIC-SAME: type = !llvm.func // GENERIC-SAME: () -> () - // CHECK: llvm.func @bar(!llvm.i64, !llvm.i64) -> !llvm.i64 + // CHECK: llvm.func @bar(i64, i64) -> i64 "llvm.func"() ({ }) {sym_name = "bar", type = !llvm.func} : () -> () // GENERIC: "llvm.func" - // CHECK: llvm.func @baz(%{{.*}}: !llvm.i64) -> !llvm.i64 + // CHECK: llvm.func @baz(%{{.*}}: i64) -> i64 "llvm.func"() ({ // GENERIC: ^bb0 - ^bb0(%arg0: !llvm.i64): + ^bb0(%arg0: i64): // GENERIC: llvm.return - llvm.return %arg0 : !llvm.i64 + llvm.return %arg0 : i64 // GENERIC: sym_name = "baz" // GENERIC-SAME: type = !llvm.func // GENERIC-SAME: () -> () }) {sym_name = "baz", type = !llvm.func} : () -> () - // CHECK: llvm.func @qux(!llvm.ptr {llvm.noalias = true}, !llvm.i64) + // CHECK: llvm.func @qux(!llvm.ptr {llvm.noalias = true}, i64) // CHECK: attributes {xxx = {yyy = 42 : i64}} "llvm.func"() ({ }) {sym_name = "qux", type = !llvm.func, i64)>, @@ -40,14 +40,14 @@ // CHECK: llvm.func @roundtrip1() llvm.func @roundtrip1() - // CHECK: llvm.func @roundtrip2(!llvm.i64, !llvm.float) -> !llvm.double - llvm.func @roundtrip2(!llvm.i64, !llvm.float) -> !llvm.double + // CHECK: llvm.func @roundtrip2(i64, !llvm.float) -> !llvm.double + llvm.func @roundtrip2(i64, !llvm.float) -> !llvm.double - // CHECK: llvm.func @roundtrip3(!llvm.i32, !llvm.i1) - llvm.func @roundtrip3(%a: !llvm.i32, %b: !llvm.i1) + // CHECK: llvm.func @roundtrip3(i32, i1) + llvm.func @roundtrip3(%a: i32, %b: i1) - // CHECK: llvm.func @roundtrip4(%{{.*}}: !llvm.i32, %{{.*}}: !llvm.i1) { - llvm.func @roundtrip4(%a: !llvm.i32, %b: !llvm.i1) { + // CHECK: llvm.func @roundtrip4(%{{.*}}: i32, %{{.*}}: i1) { + llvm.func @roundtrip4(%a: i32, %b: i1) { llvm.return } @@ -66,8 +66,8 @@ llvm.return } - // CHECK: llvm.func @roundtrip8() -> !llvm.i32 - llvm.func @roundtrip8() -> !llvm.i32 attributes {} + // CHECK: llvm.func @roundtrip8() -> i32 + llvm.func @roundtrip8() -> i32 attributes {} // CHECK: llvm.func @roundtrip9(!llvm.ptr {llvm.noalias = true}) llvm.func @roundtrip9(!llvm.ptr {llvm.noalias = true}) @@ -90,8 +90,8 @@ // CHECK: llvm.func @variadic(...) llvm.func @variadic(...) - // CHECK: llvm.func @variadic_args(!llvm.i32, !llvm.i32, ...) - llvm.func @variadic_args(!llvm.i32, !llvm.i32, ...) + // CHECK: llvm.func @variadic_args(i32, i32, ...) + llvm.func @variadic_args(i32, i32, ...) // // Check that functions can have linkage attributes. @@ -140,7 +140,7 @@ module { // expected-error@+1 {{requires 'type' attribute of wrapped LLVM function type}} - "llvm.func"() ({}) {sym_name = "non_function_type", type = !llvm.i64} : () -> () + "llvm.func"() ({}) {sym_name = "non_function_type", type = i64} : () -> () } // ----- @@ -148,7 +148,7 @@ module { // expected-error@+1 {{entry block must have 0 arguments}} "llvm.func"() ({ - ^bb0(%arg0: !llvm.i64): + ^bb0(%arg0: i64): llvm.return }) {sym_name = "wrong_arg_number", type = !llvm.func} : () -> () } @@ -158,7 +158,7 @@ module { // expected-error@+1 {{entry block argument #0 is not of LLVM type}} "llvm.func"() ({ - ^bb0(%arg0: i64): + ^bb0(%arg0: tensor<*xf32>): llvm.return }) {sym_name = "wrong_arg_number", type = !llvm.func} : () -> () } @@ -168,7 +168,7 @@ module { // expected-error@+1 {{entry block argument #0 does not match the function signature}} "llvm.func"() ({ - ^bb0(%arg0: !llvm.i32): + ^bb0(%arg0: i32): llvm.return }) {sym_name = "wrong_arg_number", type = !llvm.func} : () -> () } @@ -177,21 +177,21 @@ module { // expected-error@+1 {{failed to construct function type: expected LLVM type for function arguments}} - llvm.func @foo(i64) + llvm.func @foo(tensor<*xf32>) } // ----- module { // expected-error@+1 {{failed to construct function type: expected LLVM type for function results}} - llvm.func @foo() -> i64 + llvm.func @foo() -> tensor<*xf32> } // ----- module { // expected-error@+1 {{failed to construct function type: expected zero or one function result}} - llvm.func @foo() -> (!llvm.i64, !llvm.i64) + llvm.func @foo() -> (i64, i64) } // ----- @@ -207,7 +207,7 @@ module { // expected-error@+1 {{variadic arguments must be in the end of the argument list}} - llvm.func @variadic_inside(%arg0: !llvm.i32, ..., %arg1: !llvm.i32) + llvm.func @variadic_inside(%arg0: i32, ..., %arg1: i32) } // ----- diff --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir --- a/mlir/test/Dialect/LLVMIR/global.mlir +++ b/mlir/test/Dialect/LLVMIR/global.mlir @@ -1,13 +1,13 @@ // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s // CHECK: llvm.mlir.global external @default_external -llvm.mlir.global @default_external() : !llvm.i64 +llvm.mlir.global @default_external() : i64 // CHECK: llvm.mlir.global external constant @default_external_constant -llvm.mlir.global constant @default_external_constant(42) : !llvm.i64 +llvm.mlir.global constant @default_external_constant(42) : i64 -// CHECK: llvm.mlir.global internal @global(42 : i64) : !llvm.i64 -llvm.mlir.global internal @global(42 : i64) : !llvm.i64 +// CHECK: llvm.mlir.global internal @global(42 : i64) : i64 +llvm.mlir.global internal @global(42 : i64) : i64 // CHECK: llvm.mlir.global internal constant @constant(3.700000e+01 : f64) : !llvm.float llvm.mlir.global internal constant @constant(37.0) : !llvm.float @@ -19,38 +19,38 @@ llvm.mlir.global internal @string_notype("1234567") // CHECK: llvm.mlir.global internal @global_undef() -llvm.mlir.global internal @global_undef() : !llvm.i64 +llvm.mlir.global internal @global_undef() : i64 -// CHECK: llvm.mlir.global internal @global_mega_initializer() : !llvm.i64 { -// CHECK-NEXT: %[[c:[0-9]+]] = llvm.mlir.constant(42 : i64) : !llvm.i64 -// CHECK-NEXT: llvm.return %[[c]] : !llvm.i64 +// CHECK: llvm.mlir.global internal @global_mega_initializer() : i64 { +// CHECK-NEXT: %[[c:[0-9]+]] = llvm.mlir.constant(42 : i64) : i64 +// CHECK-NEXT: llvm.return %[[c]] : i64 // CHECK-NEXT: } -llvm.mlir.global internal @global_mega_initializer() : !llvm.i64 { - %c = llvm.mlir.constant(42 : i64) : !llvm.i64 - llvm.return %c : !llvm.i64 +llvm.mlir.global internal @global_mega_initializer() : i64 { + %c = llvm.mlir.constant(42 : i64) : i64 + llvm.return %c : i64 } // Check different linkage types. // CHECK: llvm.mlir.global private -llvm.mlir.global private @private() : !llvm.i64 +llvm.mlir.global private @private() : i64 // CHECK: llvm.mlir.global internal -llvm.mlir.global internal @internal() : !llvm.i64 +llvm.mlir.global internal @internal() : i64 // CHECK: llvm.mlir.global available_externally -llvm.mlir.global available_externally @available_externally() : !llvm.i64 +llvm.mlir.global available_externally @available_externally() : i64 // CHECK: llvm.mlir.global linkonce -llvm.mlir.global linkonce @linkonce() : !llvm.i64 +llvm.mlir.global linkonce @linkonce() : i64 // CHECK: llvm.mlir.global weak -llvm.mlir.global weak @weak() : !llvm.i64 +llvm.mlir.global weak @weak() : i64 // CHECK: llvm.mlir.global common -llvm.mlir.global common @common() : !llvm.i64 +llvm.mlir.global common @common() : i64 // CHECK: llvm.mlir.global appending -llvm.mlir.global appending @appending() : !llvm.i64 +llvm.mlir.global appending @appending() : i64 // CHECK: llvm.mlir.global extern_weak -llvm.mlir.global extern_weak @extern_weak() : !llvm.i64 +llvm.mlir.global extern_weak @extern_weak() : i64 // CHECK: llvm.mlir.global linkonce_odr -llvm.mlir.global linkonce_odr @linkonce_odr() : !llvm.i64 +llvm.mlir.global linkonce_odr @linkonce_odr() : i64 // CHECK: llvm.mlir.global weak_odr -llvm.mlir.global weak_odr @weak_odr() : !llvm.i64 +llvm.mlir.global weak_odr @weak_odr() : i64 // CHECK-LABEL: references func @references() { @@ -66,7 +66,7 @@ // ----- // expected-error @+1 {{requires string attribute 'sym_name'}} -"llvm.mlir.global"() ({}) {type = !llvm.i64, constant, value = 42 : i64} : () -> () +"llvm.mlir.global"() ({}) {type = i64, constant, value = 42 : i64} : () -> () // ----- @@ -81,18 +81,18 @@ // ----- // expected-error @+1 {{'addr_space' failed to satisfy constraint: 32-bit signless integer attribute whose value is non-negative}} -"llvm.mlir.global"() ({}) {sym_name = "foo", type = !llvm.i64, value = 42 : i64, addr_space = -1 : i32, linkage = 0} : () -> () +"llvm.mlir.global"() ({}) {sym_name = "foo", type = i64, value = 42 : i64, addr_space = -1 : i32, linkage = 0} : () -> () // ----- // expected-error @+1 {{'addr_space' failed to satisfy constraint: 32-bit signless integer attribute whose value is non-negative}} -"llvm.mlir.global"() ({}) {sym_name = "foo", type = !llvm.i64, value = 42 : i64, addr_space = 1.0 : f32, linkage = 0} : () -> () +"llvm.mlir.global"() ({}) {sym_name = "foo", type = i64, value = 42 : i64, addr_space = 1.0 : f32, linkage = 0} : () -> () // ----- func @foo() { // expected-error @+1 {{must appear at the module level}} - llvm.mlir.global internal @bar(42) : !llvm.i32 + llvm.mlir.global internal @bar(42) : i32 } // ----- @@ -108,11 +108,11 @@ // ----- // expected-error @+1 {{expected zero or one type}} -llvm.mlir.global internal @more_than_one_type(0) : !llvm.i64, !llvm.i32 +llvm.mlir.global internal @more_than_one_type(0) : i64, i32 // ----- -llvm.mlir.global internal @foo(0: i32) : !llvm.i32 +llvm.mlir.global internal @foo(0: i32) : i32 func @bar() { // expected-error @+2{{expected ':'}} @@ -137,7 +137,7 @@ // ----- -llvm.mlir.global internal @foo(0: i32) : !llvm.i32 +llvm.mlir.global internal @foo(0: i32) : i32 func @bar() { // expected-error @+1 {{the type must be a pointer to the type of the referenced global}} @@ -157,29 +157,29 @@ // expected-error @+2 {{'llvm.mlir.global' op expects regions to end with 'llvm.return', found 'llvm.mlir.constant'}} // expected-note @+1 {{in custom textual format, the absence of terminator implies 'llvm.return'}} -llvm.mlir.global internal @g() : !llvm.i64 { - %c = llvm.mlir.constant(42 : i64) : !llvm.i64 +llvm.mlir.global internal @g() : i64 { + %c = llvm.mlir.constant(42 : i64) : i64 } // ----- -// expected-error @+1 {{'llvm.mlir.global' op initializer region type '!llvm.i64' does not match global type '!llvm.i32'}} -llvm.mlir.global internal @g() : !llvm.i32 { - %c = llvm.mlir.constant(42 : i64) : !llvm.i64 - llvm.return %c : !llvm.i64 +// expected-error @+1 {{'llvm.mlir.global' op initializer region type 'i64' does not match global type 'i32'}} +llvm.mlir.global internal @g() : i32 { + %c = llvm.mlir.constant(42 : i64) : i64 + llvm.return %c : i64 } // ----- // expected-error @+1 {{'llvm.mlir.global' op cannot have both initializer value and region}} -llvm.mlir.global internal @g(43 : i64) : !llvm.i64 { - %c = llvm.mlir.constant(42 : i64) : !llvm.i64 - llvm.return %c : !llvm.i64 +llvm.mlir.global internal @g(43 : i64) : i64 { + %c = llvm.mlir.constant(42 : i64) : i64 + llvm.return %c : i64 } // ----- -llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : !llvm.i64 +llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : i64 func @mismatch_addr_space_implicit_global() { // expected-error @+1 {{op the type must be a pointer to the type of the referenced global}} llvm.mlir.addressof @g : !llvm.ptr @@ -187,7 +187,7 @@ // ----- -llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : !llvm.i64 +llvm.mlir.global internal @g(32 : i64) {addr_space = 3: i32} : i64 func @mismatch_addr_space() { // expected-error @+1 {{op the type must be a pointer to the type of the referenced global}} llvm.mlir.addressof @g : !llvm.ptr diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -1,14 +1,14 @@ // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics // expected-error@+1{{llvm.noalias argument attribute of non boolean type}} -func @invalid_noalias(%arg0: !llvm.i32 {llvm.noalias = 3}) { +func @invalid_noalias(%arg0: i32 {llvm.noalias = 3}) { "llvm.return"() : () -> () } // ----- // expected-error@+1{{llvm.align argument attribute of non integer type}} -func @invalid_align(%arg0: !llvm.i32 {llvm.align = "foo"}) { +func @invalid_align(%arg0: i32 {llvm.align = "foo"}) { "llvm.return"() : () -> () } @@ -18,79 +18,79 @@ // ----- -func @icmp_non_string(%arg0 : !llvm.i32, %arg1 : !llvm.i16) { +func @icmp_non_string(%arg0 : i32, %arg1 : i16) { // expected-error@+1 {{invalid kind of attribute specified}} - llvm.icmp 42 %arg0, %arg0 : !llvm.i32 + llvm.icmp 42 %arg0, %arg0 : i32 return } // ----- -func @icmp_wrong_string(%arg0 : !llvm.i32, %arg1 : !llvm.i16) { +func @icmp_wrong_string(%arg0 : i32, %arg1 : i16) { // expected-error@+1 {{'foo' is an incorrect value of the 'predicate' attribute}} - llvm.icmp "foo" %arg0, %arg0 : !llvm.i32 + llvm.icmp "foo" %arg0, %arg0 : i32 return } // ----- -func @alloca_missing_input_result_type(%size : !llvm.i64) { +func @alloca_missing_input_result_type(%size : i64) { // expected-error@+1 {{expected trailing function type with one argument and one result}} - llvm.alloca %size x !llvm.i32 : () -> () + llvm.alloca %size x i32 : () -> () } // ----- func @alloca_missing_input_type() { // expected-error@+1 {{expected trailing function type with one argument and one result}} - llvm.alloca %size x !llvm.i32 : () -> (!llvm.ptr) + llvm.alloca %size x i32 : () -> (!llvm.ptr) } // ----- func @alloca_missing_result_type() { // expected-error@+1 {{expected trailing function type with one argument and one result}} - llvm.alloca %size x !llvm.i32 : (!llvm.i64) -> () + llvm.alloca %size x i32 : (i64) -> () } // ----- func @alloca_non_function_type() { // expected-error@+1 {{expected trailing function type with one argument and one result}} - llvm.alloca %size x !llvm.i32 : !llvm.ptr + llvm.alloca %size x i32 : !llvm.ptr } // ----- func @alloca_non_integer_alignment() { // expected-error@+1 {{expected integer alignment}} - llvm.alloca %size x !llvm.i32 {alignment = 3.0} : !llvm.ptr + llvm.alloca %size x i32 {alignment = 3.0} : !llvm.ptr } // ----- -func @gep_missing_input_result_type(%pos : !llvm.i64, %base : !llvm.ptr) { +func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr) { // expected-error@+1 {{2 operands present, but expected 0}} llvm.getelementptr %base[%pos] : () -> () } // ----- -func @gep_missing_input_type(%pos : !llvm.i64, %base : !llvm.ptr) { +func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr) { // expected-error@+1 {{2 operands present, but expected 0}} llvm.getelementptr %base[%pos] : () -> (!llvm.ptr) } // ----- -func @gep_missing_result_type(%pos : !llvm.i64, %base : !llvm.ptr) { +func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr) { // expected-error@+1 {{op requires one result}} - llvm.getelementptr %base[%pos] : (!llvm.ptr, !llvm.i64) -> () + llvm.getelementptr %base[%pos] : (!llvm.ptr, i64) -> () } // ----- -func @gep_non_function_type(%pos : !llvm.i64, %base : !llvm.ptr) { +func @gep_non_function_type(%pos : i64, %base : !llvm.ptr) { // expected-error@+1 {{invalid kind of type specified}} llvm.getelementptr %base[%pos] : !llvm.ptr } @@ -125,7 +125,7 @@ // ----- -func @call_non_function_type(%callee : !llvm.func, %arg : !llvm.i8) { +func @call_non_function_type(%callee : !llvm.func, %arg : i8) { // expected-error@+1 {{expected function type}} llvm.call %callee(%arg) : !llvm.func } @@ -139,7 +139,7 @@ // ----- -func @call_non_function_type(%callee : !llvm.func, %arg : !llvm.i8) { +func @call_non_function_type(%callee : !llvm.func, %arg : i8) { // expected-error@+1 {{expected function type}} llvm.call %callee(%arg) : !llvm.func } @@ -162,41 +162,41 @@ // ----- -func @call_non_llvm_indirect(%arg0 : i32) { - // expected-error@+1 {{'llvm.call' op operand #0 must be LLVM dialect-compatible type, but got 'i32'}} - "llvm.call"(%arg0) : (i32) -> () +func @call_non_llvm_indirect(%arg0 : tensor<*xi32>) { + // expected-error@+1 {{'llvm.call' op operand #0 must be LLVM dialect-compatible type}} + "llvm.call"(%arg0) : (tensor<*xi32>) -> () } // ----- -llvm.func @callee_func(!llvm.i8) -> () +llvm.func @callee_func(i8) -> () -func @callee_arg_mismatch(%arg0 : !llvm.i32) { - // expected-error@+1 {{'llvm.call' op operand type mismatch for operand 0: '!llvm.i32' != '!llvm.i8'}} - llvm.call @callee_func(%arg0) : (!llvm.i32) -> () +func @callee_arg_mismatch(%arg0 : i32) { + // expected-error@+1 {{'llvm.call' op operand type mismatch for operand 0: 'i32' != 'i8'}} + llvm.call @callee_func(%arg0) : (i32) -> () } // ----- -func @indirect_callee_arg_mismatch(%arg0 : !llvm.i32, %callee : !llvm.ptr>) { - // expected-error@+1 {{'llvm.call' op operand type mismatch for operand 0: '!llvm.i32' != '!llvm.i8'}} - "llvm.call"(%callee, %arg0) : (!llvm.ptr>, !llvm.i32) -> () +func @indirect_callee_arg_mismatch(%arg0 : i32, %callee : !llvm.ptr>) { + // expected-error@+1 {{'llvm.call' op operand type mismatch for operand 0: 'i32' != 'i8'}} + "llvm.call"(%callee, %arg0) : (!llvm.ptr>, i32) -> () } // ----- -llvm.func @callee_func() -> (!llvm.i8) +llvm.func @callee_func() -> (i8) func @callee_return_mismatch() { - // expected-error@+1 {{'llvm.call' op result type mismatch: '!llvm.i32' != '!llvm.i8'}} - %res = llvm.call @callee_func() : () -> (!llvm.i32) + // expected-error@+1 {{'llvm.call' op result type mismatch: 'i32' != 'i8'}} + %res = llvm.call @callee_func() : () -> (i32) } // ----- func @indirect_callee_return_mismatch(%callee : !llvm.ptr>) { - // expected-error@+1 {{'llvm.call' op result type mismatch: '!llvm.i32' != '!llvm.i8'}} - "llvm.call"(%callee) : (!llvm.ptr>) -> (!llvm.i32) + // expected-error@+1 {{'llvm.call' op result type mismatch: 'i32' != 'i8'}} + "llvm.call"(%callee) : (!llvm.ptr>) -> (i32) } // ----- @@ -208,16 +208,16 @@ // ----- -func @call_non_llvm_result(%callee : () -> (i32)) { +func @call_non_llvm_result(%callee : () -> (tensor<*xi32>)) { // expected-error@+1 {{expected result to have LLVM type}} - llvm.call %callee() : () -> (i32) + llvm.call %callee() : () -> (tensor<*xi32>) } // ----- -func @call_non_llvm_input(%callee : (i32) -> (), %arg : i32) { +func @call_non_llvm_input(%callee : (tensor<*xi32>) -> (), %arg : tensor<*xi32>) { // expected-error@+1 {{expected LLVM types as inputs}} - llvm.call %callee(%arg) : (i32) -> () + llvm.call %callee(%arg) : (tensor<*xi32>) -> () } // ----- @@ -231,7 +231,7 @@ func @insertvalue_non_llvm_type(%a : i32, %b : i32) { // expected-error@+1 {{expected LLVM IR Dialect type}} - llvm.insertvalue %a, %b[0] : i32 + llvm.insertvalue %a, %b[0] : tensor<*xi32> } // ----- @@ -273,9 +273,9 @@ // ----- -func @extractvalue_non_llvm_type(%a : i32, %b : i32) { +func @extractvalue_non_llvm_type(%a : i32, %b : tensor<*xi32>) { // expected-error@+1 {{expected LLVM IR Dialect type}} - llvm.extractvalue %b[0] : i32 + llvm.extractvalue %b[0] : tensor<*xi32> } // ----- @@ -317,21 +317,21 @@ // ----- -func @invalid_vector_type_1(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) { +func @invalid_vector_type_1(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) { // expected-error@+1 {{expected LLVM IR dialect vector type for operand #1}} - %0 = llvm.extractelement %arg2[%arg1 : !llvm.i32] : !llvm.float + %0 = llvm.extractelement %arg2[%arg1 : i32] : !llvm.float } // ----- -func @invalid_vector_type_2(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) { +func @invalid_vector_type_2(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) { // expected-error@+1 {{expected LLVM IR dialect vector type for operand #1}} - %0 = llvm.insertelement %arg2, %arg2[%arg1 : !llvm.i32] : !llvm.float + %0 = llvm.insertelement %arg2, %arg2[%arg1 : i32] : !llvm.float } // ----- -func @invalid_vector_type_3(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) { +func @invalid_vector_type_3(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) { // expected-error@+1 {{expected LLVM IR dialect vector type for operand #1}} %0 = llvm.shufflevector %arg2, %arg2 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.float, !llvm.float } @@ -339,27 +339,27 @@ // ----- func @null_non_llvm_type() { - // expected-error@+1 {{must be LLVM pointer type, but got '!llvm.i32'}} - llvm.mlir.null : !llvm.i32 + // expected-error@+1 {{must be LLVM pointer type, but got 'i32'}} + llvm.mlir.null : i32 } // ----- -func @nvvm_invalid_shfl_pred_1(%arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, %arg3 : !llvm.i32) { +func @nvvm_invalid_shfl_pred_1(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) { // expected-error@+1 {{expected return type to be a two-element struct with i1 as the second element}} - %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.i32 + %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : i32 } // ----- -func @nvvm_invalid_shfl_pred_2(%arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, %arg3 : !llvm.i32) { +func @nvvm_invalid_shfl_pred_2(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) { // expected-error@+1 {{expected return type to be a two-element struct with i1 as the second element}} %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32)> } // ----- -func @nvvm_invalid_shfl_pred_3(%arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, %arg3 : !llvm.i32) { +func @nvvm_invalid_shfl_pred_3(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i32) { // expected-error@+1 {{expected return type to be a two-element struct with i1 as the second element}} %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32, i32)> } @@ -448,7 +448,7 @@ %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float, %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) { // expected-error@+1 {{op requires one result}} - %0:2 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> (!llvm.struct<(float, float, float, float, float, float, float, float)>, !llvm.i32) + %0:2 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> (!llvm.struct<(float, float, float, float, float, float, float, float)>, i32) llvm.return %0#0 : !llvm.struct<(float, float, float, float, float, float, float, float)> } @@ -462,9 +462,9 @@ // ----- -func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{expected LLVM IR element type for operand #0 to match type for operand #1}} - %0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr, !llvm.i32) -> !llvm.float + %0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr, i32) -> !llvm.float llvm.return } @@ -472,23 +472,23 @@ func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr, %f32 : !llvm.float) { // expected-error@+1 {{expected LLVM IR result type to match type for operand #1}} - %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr, !llvm.float) -> !llvm.i32 + %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr, !llvm.float) -> i32 llvm.return } // ----- -func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{expected LLVM IR floating point type}} - %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : !llvm.i32 + %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : i32 llvm.return } // ----- -func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr, %i1 : !llvm.i1) { +func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr, %i1 : i1) { // expected-error@+1 {{unexpected LLVM IR type for 'xchg' bin_op}} - %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : !llvm.i1 + %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : i1 llvm.return } @@ -510,129 +510,129 @@ // ----- -func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @cmpxchg_mismatched_operands(%i64_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{expected LLVM IR element type for operand #0 to match type for all other operands}} - %0 = "llvm.cmpxchg"(%i64_ptr, %i32, %i32) {success_ordering=2,failure_ordering=2} : (!llvm.ptr, !llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + %0 = "llvm.cmpxchg"(%i64_ptr, %i32, %i32) {success_ordering=2,failure_ordering=2} : (!llvm.ptr, i32, i32) -> !llvm.struct<(i32, i1)> llvm.return } // ----- -func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr, %i1 : !llvm.i1) { +func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr, %i1 : i1) { // expected-error@+1 {{unexpected LLVM IR type}} - %0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : !llvm.i1 + %0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : i1 llvm.return } // ----- -func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{ordering must be at least 'monotonic'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : !llvm.i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : i32 llvm.return } // ----- -func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{ordering must be at least 'monotonic'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : !llvm.i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : i32 llvm.return } // ----- -func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{failure ordering cannot be 'release' or 'acq_rel'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : !llvm.i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : i32 llvm.return } // ----- -func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr, %i32 : !llvm.i32) { +func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{failure ordering cannot be 'release' or 'acq_rel'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : !llvm.i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : i32 llvm.return } // ----- -llvm.func @foo(!llvm.i32) -> !llvm.i32 -llvm.func @__gxx_personality_v0(...) -> !llvm.i32 +llvm.func @foo(i32) -> i32 +llvm.func @__gxx_personality_v0(...) -> i32 llvm.func @bad_landingpad(%arg0: !llvm.ptr>) attributes { personality = @__gxx_personality_v0} { - %0 = llvm.mlir.constant(3 : i32) : !llvm.i32 - %1 = llvm.mlir.constant(2 : i32) : !llvm.i32 - %2 = llvm.invoke @foo(%1) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32 + %0 = llvm.mlir.constant(3 : i32) : i32 + %1 = llvm.mlir.constant(2 : i32) : i32 + %2 = llvm.invoke @foo(%1) to ^bb1 unwind ^bb2 : (i32) -> i32 ^bb1: // pred: ^bb0 - llvm.return %1 : !llvm.i32 + llvm.return %1 : i32 ^bb2: // pred: ^bb0 // expected-error@+1 {{clause #0 is not a known constant - null, addressof, bitcast}} - %3 = llvm.landingpad cleanup (catch %1 : !llvm.i32) (catch %arg0 : !llvm.ptr>) : !llvm.struct<(ptr, i32)> - llvm.return %0 : !llvm.i32 + %3 = llvm.landingpad cleanup (catch %1 : i32) (catch %arg0 : !llvm.ptr>) : !llvm.struct<(ptr, i32)> + llvm.return %0 : i32 } // ----- -llvm.func @foo(!llvm.i32) -> !llvm.i32 -llvm.func @__gxx_personality_v0(...) -> !llvm.i32 +llvm.func @foo(i32) -> i32 +llvm.func @__gxx_personality_v0(...) -> i32 -llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 attributes { personality = @__gxx_personality_v0} { - %0 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %1 = llvm.alloca %0 x !llvm.ptr : (!llvm.i32) -> !llvm.ptr> +llvm.func @caller(%arg0: i32) -> i32 attributes { personality = @__gxx_personality_v0} { + %0 = llvm.mlir.constant(1 : i32) : i32 + %1 = llvm.alloca %0 x !llvm.ptr : (i32) -> !llvm.ptr> // expected-note@+1 {{global addresses expected as operand to bitcast used in clauses for landingpad}} %2 = llvm.bitcast %1 : !llvm.ptr> to !llvm.ptr - %3 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32 + %3 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32 ^bb1: // pred: ^bb0 - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 ^bb2: // pred: ^bb0 // expected-error@+1 {{constant clauses expected}} %5 = llvm.landingpad (catch %2 : !llvm.ptr) : !llvm.struct<(ptr, i32)> - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 } // ----- -llvm.func @foo(!llvm.i32) -> !llvm.i32 -llvm.func @__gxx_personality_v0(...) -> !llvm.i32 +llvm.func @foo(i32) -> i32 +llvm.func @__gxx_personality_v0(...) -> i32 -llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 attributes { personality = @__gxx_personality_v0} { - %0 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32 +llvm.func @caller(%arg0: i32) -> i32 attributes { personality = @__gxx_personality_v0} { + %0 = llvm.mlir.constant(1 : i32) : i32 + %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32 ^bb1: // pred: ^bb0 - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 ^bb2: // pred: ^bb0 // expected-error@+1 {{landingpad instruction expects at least one clause or cleanup attribute}} %2 = llvm.landingpad : !llvm.struct<(ptr, i32)> - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 } // ----- -llvm.func @foo(!llvm.i32) -> !llvm.i32 -llvm.func @__gxx_personality_v0(...) -> !llvm.i32 +llvm.func @foo(i32) -> i32 +llvm.func @__gxx_personality_v0(...) -> i32 -llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 attributes { personality = @__gxx_personality_v0 } { - %0 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32 +llvm.func @caller(%arg0: i32) -> i32 attributes { personality = @__gxx_personality_v0 } { + %0 = llvm.mlir.constant(1 : i32) : i32 + %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32 ^bb1: // pred: ^bb0 - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 ^bb2: // pred: ^bb0 %2 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)> // expected-error@+1 {{'llvm.resume' op expects landingpad value as operand}} - llvm.resume %0 : !llvm.i32 + llvm.resume %0 : i32 } // ----- -llvm.func @foo(!llvm.i32) -> !llvm.i32 +llvm.func @foo(i32) -> i32 -llvm.func @caller(%arg0: !llvm.i32) -> !llvm.i32 { - %0 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (!llvm.i32) -> !llvm.i32 +llvm.func @caller(%arg0: i32) -> i32 { + %0 = llvm.mlir.constant(1 : i32) : i32 + %1 = llvm.invoke @foo(%0) to ^bb1 unwind ^bb2 : (i32) -> i32 ^bb1: // pred: ^bb0 - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 ^bb2: // pred: ^bb0 // expected-error@+1 {{llvm.landingpad needs to be in a function with a personality}} %2 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)> @@ -655,15 +655,15 @@ // ----- -func @switch_wrong_number_of_weights(%arg0 : !llvm.i32) { +func @switch_wrong_number_of_weights(%arg0 : i32) { // expected-error@+1 {{expects number of branch weights to match number of successors: 3 vs 2}} llvm.switch %arg0, ^bb1 [ - 42: ^bb2(%arg0, %arg0 : !llvm.i32, !llvm.i32) + 42: ^bb2(%arg0, %arg0 : i32, i32) ] {branch_weights = dense<[13, 17, 19]> : vector<3xi32>} ^bb1: // pred: ^bb0 llvm.return -^bb2(%1: !llvm.i32, %2: !llvm.i32): // pred: ^bb0 +^bb2(%1: i32, %2: i32): // pred: ^bb0 llvm.return } diff --git a/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir b/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir --- a/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir +++ b/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir @@ -4,22 +4,22 @@ // by introducing a new block that forwards its arguments to the original // successor through an unconditional branch. // CHECK-LABEL: @repeated_successor_different_args -llvm.func @repeated_successor_different_args(%arg0: !llvm.i1, %arg1: !llvm.i32, %arg2: !llvm.i32) { +llvm.func @repeated_successor_different_args(%arg0: i1, %arg1: i32, %arg2: i32) { // CHECK: llvm.cond_br %{{.*}}, ^[[BB1:.*]]({{.*}}), ^[[BB2:.*]]({{.*}}) - llvm.cond_br %arg0, ^bb1(%arg1: !llvm.i32), ^bb1(%arg2: !llvm.i32) + llvm.cond_br %arg0, ^bb1(%arg1: i32), ^bb1(%arg2: i32) // CHECK: ^[[BB1]]({{.*}}): -^bb1(%arg3: !llvm.i32): +^bb1(%arg3: i32): llvm.return -// CHECK: ^[[BB2]](%[[ARG:.*]]: !llvm.i32): -// CHECK: llvm.br ^[[BB1]](%[[ARG]] : !llvm.i32) +// CHECK: ^[[BB2]](%[[ARG:.*]]: i32): +// CHECK: llvm.br ^[[BB1]](%[[ARG]] : i32) } // Verifies that duplicate successors without arguments do not lead to the // introduction of new blocks during legalization. // CHECK-LABEL: @repeated_successor_no_args -llvm.func @repeated_successor_no_args(%arg0: !llvm.i1) { +llvm.func @repeated_successor_no_args(%arg0: i1) { // CHECK: llvm.cond_br llvm.cond_br %arg0, ^bb1, ^bb1 diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir --- a/mlir/test/Dialect/LLVMIR/nvvm.mlir +++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir @@ -1,31 +1,31 @@ // RUN: mlir-opt %s | FileCheck %s -func @nvvm_special_regs() -> !llvm.i32 { - // CHECK: nvvm.read.ptx.sreg.tid.x : !llvm.i32 - %0 = nvvm.read.ptx.sreg.tid.x : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.tid.y : !llvm.i32 - %1 = nvvm.read.ptx.sreg.tid.y : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.tid.z : !llvm.i32 - %2 = nvvm.read.ptx.sreg.tid.z : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.ntid.x : !llvm.i32 - %3 = nvvm.read.ptx.sreg.ntid.x : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.ntid.y : !llvm.i32 - %4 = nvvm.read.ptx.sreg.ntid.y : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.ntid.z : !llvm.i32 - %5 = nvvm.read.ptx.sreg.ntid.z : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.ctaid.x : !llvm.i32 - %6 = nvvm.read.ptx.sreg.ctaid.x : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.ctaid.y : !llvm.i32 - %7 = nvvm.read.ptx.sreg.ctaid.y : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.ctaid.z : !llvm.i32 - %8 = nvvm.read.ptx.sreg.ctaid.z : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.nctaid.x : !llvm.i32 - %9 = nvvm.read.ptx.sreg.nctaid.x : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.nctaid.y : !llvm.i32 - %10 = nvvm.read.ptx.sreg.nctaid.y : !llvm.i32 - // CHECK: nvvm.read.ptx.sreg.nctaid.z : !llvm.i32 - %11 = nvvm.read.ptx.sreg.nctaid.z : !llvm.i32 - llvm.return %0 : !llvm.i32 +func @nvvm_special_regs() -> i32 { + // CHECK: nvvm.read.ptx.sreg.tid.x : i32 + %0 = nvvm.read.ptx.sreg.tid.x : i32 + // CHECK: nvvm.read.ptx.sreg.tid.y : i32 + %1 = nvvm.read.ptx.sreg.tid.y : i32 + // CHECK: nvvm.read.ptx.sreg.tid.z : i32 + %2 = nvvm.read.ptx.sreg.tid.z : i32 + // CHECK: nvvm.read.ptx.sreg.ntid.x : i32 + %3 = nvvm.read.ptx.sreg.ntid.x : i32 + // CHECK: nvvm.read.ptx.sreg.ntid.y : i32 + %4 = nvvm.read.ptx.sreg.ntid.y : i32 + // CHECK: nvvm.read.ptx.sreg.ntid.z : i32 + %5 = nvvm.read.ptx.sreg.ntid.z : i32 + // CHECK: nvvm.read.ptx.sreg.ctaid.x : i32 + %6 = nvvm.read.ptx.sreg.ctaid.x : i32 + // CHECK: nvvm.read.ptx.sreg.ctaid.y : i32 + %7 = nvvm.read.ptx.sreg.ctaid.y : i32 + // CHECK: nvvm.read.ptx.sreg.ctaid.z : i32 + %8 = nvvm.read.ptx.sreg.ctaid.z : i32 + // CHECK: nvvm.read.ptx.sreg.nctaid.x : i32 + %9 = nvvm.read.ptx.sreg.nctaid.x : i32 + // CHECK: nvvm.read.ptx.sreg.nctaid.y : i32 + %10 = nvvm.read.ptx.sreg.nctaid.y : i32 + // CHECK: nvvm.read.ptx.sreg.nctaid.z : i32 + %11 = nvvm.read.ptx.sreg.nctaid.z : i32 + llvm.return %0 : i32 } func @llvm.nvvm.barrier0() { @@ -35,18 +35,18 @@ } func @nvvm_shfl( - %arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, - %arg3 : !llvm.i32, %arg4 : !llvm.float) -> !llvm.i32 { - // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i32 - %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 : !llvm.i32 + %arg0 : i32, %arg1 : i32, %arg2 : i32, + %arg3 : i32, %arg4 : !llvm.float) -> i32 { + // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i32 + %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 : i32 // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.float %1 = nvvm.shfl.sync.bfly %arg0, %arg4, %arg1, %arg2 : !llvm.float - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 } func @nvvm_shfl_pred( - %arg0 : !llvm.i32, %arg1 : !llvm.i32, %arg2 : !llvm.i32, - %arg3 : !llvm.i32, %arg4 : !llvm.float) -> !llvm.struct<(i32, i1)> { + %arg0 : i32, %arg1 : i32, %arg2 : i32, + %arg3 : i32, %arg4 : !llvm.float) -> !llvm.struct<(i32, i1)> { // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(i32, i1)> %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32, i1)> // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(float, i1)> @@ -54,10 +54,10 @@ llvm.return %0 : !llvm.struct<(i32, i1)> } -func @nvvm_vote(%arg0 : !llvm.i32, %arg1 : !llvm.i1) -> !llvm.i32 { - // CHECK: nvvm.vote.ballot.sync %{{.*}}, %{{.*}} : !llvm.i32 - %0 = nvvm.vote.ballot.sync %arg0, %arg1 : !llvm.i32 - llvm.return %0 : !llvm.i32 +func @nvvm_vote(%arg0 : i32, %arg1 : i1) -> i32 { + // CHECK: nvvm.vote.ballot.sync %{{.*}}, %{{.*}} : i32 + %0 = nvvm.vote.ballot.sync %arg0, %arg1 : i32 + llvm.return %0 : i32 } func @nvvm_mma(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>, diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir --- a/mlir/test/Dialect/LLVMIR/rocdl.mlir +++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir @@ -1,32 +1,32 @@ // RUN: mlir-opt %s | FileCheck %s -func @rocdl_special_regs() -> !llvm.i32 { +func @rocdl_special_regs() -> i32 { // CHECK-LABEL: rocdl_special_regs - // CHECK: rocdl.workitem.id.x : !llvm.i32 - %0 = rocdl.workitem.id.x : !llvm.i32 - // CHECK: rocdl.workitem.id.y : !llvm.i32 - %1 = rocdl.workitem.id.y : !llvm.i32 - // CHECK: rocdl.workitem.id.z : !llvm.i32 - %2 = rocdl.workitem.id.z : !llvm.i32 - // CHECK: rocdl.workgroup.id.x : !llvm.i32 - %3 = rocdl.workgroup.id.x : !llvm.i32 - // CHECK: rocdl.workgroup.id.y : !llvm.i32 - %4 = rocdl.workgroup.id.y : !llvm.i32 - // CHECK: rocdl.workgroup.id.z : !llvm.i32 - %5 = rocdl.workgroup.id.z : !llvm.i32 - // CHECK: rocdl.workgroup.dim.x : !llvm.i32 - %6 = rocdl.workgroup.dim.x : !llvm.i32 - // CHECK: rocdl.workgroup.dim.y : !llvm.i32 - %7 = rocdl.workgroup.dim.y : !llvm.i32 - // CHECK: rocdl.workgroup.dim.z : !llvm.i32 - %8 = rocdl.workgroup.dim.z : !llvm.i32 - // CHECK: rocdl.grid.dim.x : !llvm.i32 - %9 = rocdl.grid.dim.x : !llvm.i32 - // CHECK: rocdl.grid.dim.y : !llvm.i32 - %10 = rocdl.grid.dim.y : !llvm.i32 - // CHECK: rocdl.grid.dim.z : !llvm.i32 - %11 = rocdl.grid.dim.z : !llvm.i32 - llvm.return %0 : !llvm.i32 + // CHECK: rocdl.workitem.id.x : i32 + %0 = rocdl.workitem.id.x : i32 + // CHECK: rocdl.workitem.id.y : i32 + %1 = rocdl.workitem.id.y : i32 + // CHECK: rocdl.workitem.id.z : i32 + %2 = rocdl.workitem.id.z : i32 + // CHECK: rocdl.workgroup.id.x : i32 + %3 = rocdl.workgroup.id.x : i32 + // CHECK: rocdl.workgroup.id.y : i32 + %4 = rocdl.workgroup.id.y : i32 + // CHECK: rocdl.workgroup.id.z : i32 + %5 = rocdl.workgroup.id.z : i32 + // CHECK: rocdl.workgroup.dim.x : i32 + %6 = rocdl.workgroup.dim.x : i32 + // CHECK: rocdl.workgroup.dim.y : i32 + %7 = rocdl.workgroup.dim.y : i32 + // CHECK: rocdl.workgroup.dim.z : i32 + %8 = rocdl.workgroup.dim.z : i32 + // CHECK: rocdl.grid.dim.x : i32 + %9 = rocdl.grid.dim.x : i32 + // CHECK: rocdl.grid.dim.y : i32 + %10 = rocdl.grid.dim.y : i32 + // CHECK: rocdl.grid.dim.z : i32 + %11 = rocdl.grid.dim.z : i32 + llvm.return %0 : i32 } func @rocdl.barrier() { @@ -36,118 +36,118 @@ } func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float, - %arg2 : !llvm.vec<32 x float>, %arg3 : !llvm.i32, + %arg2 : !llvm.vec<32 x float>, %arg3 : i32, %arg4 : !llvm.vec<16 x float>, %arg5 : !llvm.vec<4 x float>, %arg6 : !llvm.vec<4 x half>, %arg7 : !llvm.vec<32 x i32>, %arg8 : !llvm.vec<16 x i32>, %arg9 : !llvm.vec<4 x i32>, %arg10 : !llvm.vec<2 x i16>) -> !llvm.vec<32 x float> { // CHECK-LABEL: rocdl.xdlops - // CHECK: rocdl.mfma.f32.32x32x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + // CHECK: rocdl.mfma.f32.32x32x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float> %r0 = rocdl.mfma.f32.32x32x1f32 %arg0, %arg1, %arg2, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + i32, i32, i32) -> !llvm.vec<32 x float> - // CHECK: rocdl.mfma.f32.16x16x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + // CHECK: rocdl.mfma.f32.16x16x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float> %r1 = rocdl.mfma.f32.16x16x1f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> - // CHECK: rocdl.mfma.f32.16x16x4f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + // CHECK: rocdl.mfma.f32.16x16x4f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float> %r2 = rocdl.mfma.f32.16x16x4f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> - // CHECK: rocdl.mfma.f32.4x4x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + // CHECK: rocdl.mfma.f32.4x4x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float> %r3 = rocdl.mfma.f32.4x4x1f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> - // CHECK: rocdl.mfma.f32.32x32x2f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + // CHECK: rocdl.mfma.f32.32x32x2f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float> %r4= rocdl.mfma.f32.32x32x2f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> - // CHECK: rocdl.mfma.f32.32x32x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + // CHECK: rocdl.mfma.f32.32x32x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float> %r5 = rocdl.mfma.f32.32x32x4f16 %arg6, %arg6, %arg2, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + i32, i32, i32) -> !llvm.vec<32 x float> - // CHECK: rocdl.mfma.f32.16x16x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + // CHECK: rocdl.mfma.f32.16x16x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float> %r6 = rocdl.mfma.f32.16x16x4f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> - // CHECK: rocdl.mfma.f32.4x4x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + // CHECK: rocdl.mfma.f32.4x4x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float> %r7 = rocdl.mfma.f32.4x4x4f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> - // CHECK: rocdl.mfma.f32.32x32x8f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + // CHECK: rocdl.mfma.f32.32x32x8f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float> %r8 = rocdl.mfma.f32.32x32x8f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> - // CHECK: rocdl.mfma.f32.16x16x16f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + // CHECK: rocdl.mfma.f32.16x16x16f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float> %r9 = rocdl.mfma.f32.16x16x16f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> - // CHECK: rocdl.mfma.i32.32x32x4i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<32 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x i32> + // CHECK: rocdl.mfma.i32.32x32x4i8 {{.*}} : (i32, i32, !llvm.vec<32 x i32>, i32, i32, i32) -> !llvm.vec<32 x i32> %r10 = rocdl.mfma.i32.32x32x4i8 %arg3, %arg3, %arg7, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<32 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x i32> + (i32, i32, !llvm.vec<32 x i32>, + i32, i32, i32) -> !llvm.vec<32 x i32> - // CHECK: rocdl.mfma.i32.16x16x4i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32> + // CHECK: rocdl.mfma.i32.16x16x4i8 {{.*}} : (i32, i32, !llvm.vec<16 x i32>, i32, i32, i32) -> !llvm.vec<16 x i32> %r11 = rocdl.mfma.i32.16x16x4i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32> + (i32, i32, !llvm.vec<16 x i32>, + i32, i32, i32) -> !llvm.vec<16 x i32> - // CHECK: rocdl.mfma.i32.4x4x4i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32> + // CHECK: rocdl.mfma.i32.4x4x4i8 {{.*}} : (i32, i32, !llvm.vec<4 x i32>, i32, i32, i32) -> !llvm.vec<4 x i32> %r12 = rocdl.mfma.i32.4x4x4i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32> + (i32, i32, !llvm.vec<4 x i32>, + i32, i32, i32) -> !llvm.vec<4 x i32> - // CHECK: rocdl.mfma.i32.32x32x8i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32> + // CHECK: rocdl.mfma.i32.32x32x8i8 {{.*}} : (i32, i32, !llvm.vec<16 x i32>, i32, i32, i32) -> !llvm.vec<16 x i32> %r13 = rocdl.mfma.i32.32x32x8i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32> + (i32, i32, !llvm.vec<16 x i32>, + i32, i32, i32) -> !llvm.vec<16 x i32> - // CHECK: rocdl.mfma.i32.16x16x16i8 {{.*}} : (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32> + // CHECK: rocdl.mfma.i32.16x16x16i8 {{.*}} : (i32, i32, !llvm.vec<4 x i32>, i32, i32, i32) -> !llvm.vec<4 x i32> %r14 = rocdl.mfma.i32.16x16x16i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32> + (i32, i32, !llvm.vec<4 x i32>, + i32, i32, i32) -> !llvm.vec<4 x i32> - // CHECK: rocdl.mfma.f32.32x32x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + // CHECK: rocdl.mfma.f32.32x32x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float> %r15 = rocdl.mfma.f32.32x32x2bf16 %arg10, %arg10, %arg2, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + i32, i32, i32) -> !llvm.vec<32 x float> - // CHECK: rocdl.mfma.f32.16x16x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + // CHECK: rocdl.mfma.f32.16x16x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float> %r16 = rocdl.mfma.f32.16x16x2bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> - // CHECK: rocdl.mfma.f32.4x4x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + // CHECK: rocdl.mfma.f32.4x4x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float> %r17 = rocdl.mfma.f32.4x4x2bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> - // CHECK: rocdl.mfma.f32.32x32x4bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + // CHECK: rocdl.mfma.f32.32x32x4bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float> %r18 = rocdl.mfma.f32.32x32x4bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> - // CHECK: rocdl.mfma.f32.16x16x8bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + // CHECK: rocdl.mfma.f32.16x16x8bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float> %r19 = rocdl.mfma.f32.16x16x8bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> llvm.return %r0 : !llvm.vec<32 x float> } -llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : !llvm.i32, - %offset : !llvm.i32, %glc : !llvm.i1, - %slc : !llvm.i1, %vdata1 : !llvm.vec<1 x float>, +llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : i32, + %offset : i32, %glc : i1, + %slc : i1, %vdata1 : !llvm.vec<1 x float>, %vdata2 : !llvm.vec<2 x float>, %vdata4 : !llvm.vec<4 x float>) { // CHECK-LABEL: rocdl.mubuf // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<1 x float> diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -1,28 +1,28 @@ // RUN: mlir-opt %s | mlir-opt | FileCheck %s // CHECK-LABEL: func @ops -// CHECK-SAME: (%[[I32:.*]]: !llvm.i32, %[[FLOAT:.*]]: !llvm.float, %[[I8PTR1:.*]]: !llvm.ptr, %[[I8PTR2:.*]]: !llvm.ptr, %[[BOOL:.*]]: !llvm.i1) -func @ops(%arg0: !llvm.i32, %arg1: !llvm.float, +// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: !llvm.float, %[[I8PTR1:.*]]: !llvm.ptr, %[[I8PTR2:.*]]: !llvm.ptr, %[[BOOL:.*]]: i1) +func @ops(%arg0: i32, %arg1: !llvm.float, %arg2: !llvm.ptr, %arg3: !llvm.ptr, - %arg4: !llvm.i1) { + %arg4: i1) { // Integer arithmetic binary operations. // -// CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.sub %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.mul %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.udiv %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.sdiv %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.urem %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.srem %[[I32]], %[[I32]] : !llvm.i32 -// CHECK: {{.*}} = llvm.icmp "ne" %[[I32]], %[[I32]] : !llvm.i32 - %0 = llvm.add %arg0, %arg0 : !llvm.i32 - %1 = llvm.sub %arg0, %arg0 : !llvm.i32 - %2 = llvm.mul %arg0, %arg0 : !llvm.i32 - %3 = llvm.udiv %arg0, %arg0 : !llvm.i32 - %4 = llvm.sdiv %arg0, %arg0 : !llvm.i32 - %5 = llvm.urem %arg0, %arg0 : !llvm.i32 - %6 = llvm.srem %arg0, %arg0 : !llvm.i32 - %7 = llvm.icmp "ne" %arg0, %arg0 : !llvm.i32 +// CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.sub %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.mul %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.udiv %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.sdiv %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.urem %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.srem %[[I32]], %[[I32]] : i32 +// CHECK: {{.*}} = llvm.icmp "ne" %[[I32]], %[[I32]] : i32 + %0 = llvm.add %arg0, %arg0 : i32 + %1 = llvm.sub %arg0, %arg0 : i32 + %2 = llvm.mul %arg0, %arg0 : i32 + %3 = llvm.udiv %arg0, %arg0 : i32 + %4 = llvm.sdiv %arg0, %arg0 : i32 + %5 = llvm.urem %arg0, %arg0 : i32 + %6 = llvm.srem %arg0, %arg0 : i32 + %7 = llvm.icmp "ne" %arg0, %arg0 : i32 // Floating point binary operations. // @@ -39,29 +39,29 @@ // Memory-related operations. // -// CHECK-NEXT: %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x !llvm.double : (!llvm.i32) -> !llvm.ptr -// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]], %[[I32]]] : (!llvm.ptr, !llvm.i32, !llvm.i32) -> !llvm.ptr +// CHECK-NEXT: %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x !llvm.double : (i32) -> !llvm.ptr +// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]], %[[I32]]] : (!llvm.ptr, i32, i32) -> !llvm.ptr // CHECK-NEXT: %[[VALUE:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK-NEXT: llvm.store %[[VALUE]], %[[ALLOCA]] : !llvm.ptr // CHECK-NEXT: %{{.*}} = llvm.bitcast %[[ALLOCA]] : !llvm.ptr to !llvm.ptr - %13 = llvm.alloca %arg0 x !llvm.double : (!llvm.i32) -> !llvm.ptr - %14 = llvm.getelementptr %13[%arg0, %arg0] : (!llvm.ptr, !llvm.i32, !llvm.i32) -> !llvm.ptr + %13 = llvm.alloca %arg0 x !llvm.double : (i32) -> !llvm.ptr + %14 = llvm.getelementptr %13[%arg0, %arg0] : (!llvm.ptr, i32, i32) -> !llvm.ptr %15 = llvm.load %14 : !llvm.ptr llvm.store %15, %13 : !llvm.ptr %16 = llvm.bitcast %13 : !llvm.ptr to !llvm.ptr // Function call-related operations. // -// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> +// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (i32) -> !llvm.struct<(i32, double, i32)> // CHECK: %[[VALUE:.*]] = llvm.extractvalue %[[STRUCT]][0] : !llvm.struct<(i32, double, i32)> // CHECK: %[[NEW_STRUCT:.*]] = llvm.insertvalue %[[VALUE]], %[[STRUCT]][2] : !llvm.struct<(i32, double, i32)> // CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> -// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> - %17 = llvm.call @foo(%arg0) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> +// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (i32) -> !llvm.struct<(i32, double, i32)> + %17 = llvm.call @foo(%arg0) : (i32) -> !llvm.struct<(i32, double, i32)> %18 = llvm.extractvalue %17[0] : !llvm.struct<(i32, double, i32)> %19 = llvm.insertvalue %18, %17[2] : !llvm.struct<(i32, double, i32)> %20 = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> - %21 = llvm.call %20(%arg0) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> + %21 = llvm.call %20(%arg0) : (i32) -> !llvm.struct<(i32, double, i32)> // Terminator operations and their successors. @@ -77,9 +77,9 @@ // CHECK: ^[[BB2]] ^bb2: // CHECK: %{{.*}} = llvm.mlir.undef : !llvm.struct<(i32, double, i32)> -// CHECK: %{{.*}} = llvm.mlir.constant(42 : i64) : !llvm.i47 +// CHECK: %{{.*}} = llvm.mlir.constant(42 : i64) : i47 %22 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)> - %23 = llvm.mlir.constant(42) : !llvm.i47 + %23 = llvm.mlir.constant(42) : i47 // CHECK: llvm.switch %0, ^[[BB3]] [ // CHECK-NEXT: 1: ^[[BB4:.*]], // CHECK-NEXT: 2: ^[[BB5:.*]], @@ -116,15 +116,15 @@ // CHECK: ^[[BB7]] ^bb7: // Misc operations. -// CHECK: %{{.*}} = llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.i32 - %24 = llvm.select %7, %0, %1 : !llvm.i1, !llvm.i32 +// CHECK: %{{.*}} = llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i32 + %24 = llvm.select %7, %0, %1 : i1, i32 // Integer to pointer and pointer to integer conversions. // -// CHECK: %[[PTR:.*]] = llvm.inttoptr %[[I32]] : !llvm.i32 to !llvm.ptr -// CHECK: %{{.*}} = llvm.ptrtoint %[[PTR]] : !llvm.ptr to !llvm.i32 - %25 = llvm.inttoptr %arg0 : !llvm.i32 to !llvm.ptr - %26 = llvm.ptrtoint %25 : !llvm.ptr to !llvm.i32 +// CHECK: %[[PTR:.*]] = llvm.inttoptr %[[I32]] : i32 to !llvm.ptr +// CHECK: %{{.*}} = llvm.ptrtoint %[[PTR]] : !llvm.ptr to i32 + %25 = llvm.inttoptr %arg0 : i32 to !llvm.ptr + %26 = llvm.ptrtoint %25 : !llvm.ptr to i32 // Extended and Quad floating point // @@ -142,53 +142,53 @@ // CHECK: "llvm.intr.pow"(%[[FLOAT]], %[[FLOAT]]) : (!llvm.float, !llvm.float) -> !llvm.float %31 = "llvm.intr.pow"(%arg1, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float -// CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (!llvm.i32) -> !llvm.i32 - %32 = "llvm.intr.bitreverse"(%arg0) : (!llvm.i32) -> !llvm.i32 +// CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (i32) -> i32 + %32 = "llvm.intr.bitreverse"(%arg0) : (i32) -> i32 -// CHECK: "llvm.intr.ctpop"(%{{.*}}) : (!llvm.i32) -> !llvm.i32 - %33 = "llvm.intr.ctpop"(%arg0) : (!llvm.i32) -> !llvm.i32 +// CHECK: "llvm.intr.ctpop"(%{{.*}}) : (i32) -> i32 + %33 = "llvm.intr.ctpop"(%arg0) : (i32) -> i32 -// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, !llvm.i32, !llvm.i1) -> () - "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr, !llvm.ptr, !llvm.i32, !llvm.i1) -> () +// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, i32, i1) -> () + "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr, !llvm.ptr, i32, i1) -> () -// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, !llvm.i32, !llvm.i1) -> () - "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr, !llvm.ptr, !llvm.i32, !llvm.i1) -> () +// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, i32, i1) -> () + "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr, !llvm.ptr, i32, i1) -> () // CHECK: %[[SZ:.*]] = llvm.mlir.constant - %sz = llvm.mlir.constant(10: i64) : !llvm.i64 -// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i1) -> () - "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg4) : (!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i1) -> () + %sz = llvm.mlir.constant(10: i64) : i64 +// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, i64, i1) -> () + "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg4) : (!llvm.ptr, !llvm.ptr, i64, i1) -> () // CHECK: llvm.return llvm.return } // An larger self-contained function. -// CHECK-LABEL: llvm.func @foo(%{{.*}}: !llvm.i32) -> !llvm.struct<(i32, double, i32)> { -llvm.func @foo(%arg0: !llvm.i32) -> !llvm.struct<(i32, double, i32)> { -// CHECK: %[[V0:.*]] = llvm.mlir.constant(3 : i64) : !llvm.i32 -// CHECK: %[[V1:.*]] = llvm.mlir.constant(3 : i64) : !llvm.i32 +// CHECK-LABEL: llvm.func @foo(%{{.*}}: i32) -> !llvm.struct<(i32, double, i32)> { +llvm.func @foo(%arg0: i32) -> !llvm.struct<(i32, double, i32)> { +// CHECK: %[[V0:.*]] = llvm.mlir.constant(3 : i64) : i32 +// CHECK: %[[V1:.*]] = llvm.mlir.constant(3 : i64) : i32 // CHECK: %[[V2:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : !llvm.double // CHECK: %[[V3:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : !llvm.double -// CHECK: %[[V4:.*]] = llvm.add %[[V0]], %[[V1]] : !llvm.i32 -// CHECK: %[[V5:.*]] = llvm.mul %[[V4]], %[[V1]] : !llvm.i32 +// CHECK: %[[V4:.*]] = llvm.add %[[V0]], %[[V1]] : i32 +// CHECK: %[[V5:.*]] = llvm.mul %[[V4]], %[[V1]] : i32 // CHECK: %[[V6:.*]] = llvm.fadd %[[V2]], %[[V3]] : !llvm.double // CHECK: %[[V7:.*]] = llvm.fsub %[[V3]], %[[V6]] : !llvm.double -// CHECK: %[[V8:.*]] = llvm.mlir.constant(1 : i64) : !llvm.i1 -// CHECK: llvm.cond_br %[[V8]], ^[[BB1:.*]](%[[V4]] : !llvm.i32), ^[[BB2:.*]](%[[V4]] : !llvm.i32) - %0 = llvm.mlir.constant(3) : !llvm.i32 - %1 = llvm.mlir.constant(3) : !llvm.i32 +// CHECK: %[[V8:.*]] = llvm.mlir.constant(1 : i64) : i1 +// CHECK: llvm.cond_br %[[V8]], ^[[BB1:.*]](%[[V4]] : i32), ^[[BB2:.*]](%[[V4]] : i32) + %0 = llvm.mlir.constant(3) : i32 + %1 = llvm.mlir.constant(3) : i32 %2 = llvm.mlir.constant(4.200000e+01) : !llvm.double %3 = llvm.mlir.constant(4.200000e+01) : !llvm.double - %4 = llvm.add %0, %1 : !llvm.i32 - %5 = llvm.mul %4, %1 : !llvm.i32 + %4 = llvm.add %0, %1 : i32 + %5 = llvm.mul %4, %1 : i32 %6 = llvm.fadd %2, %3 : !llvm.double %7 = llvm.fsub %3, %6 : !llvm.double - %8 = llvm.mlir.constant(1) : !llvm.i1 - llvm.cond_br %8, ^bb1(%4 : !llvm.i32), ^bb2(%4 : !llvm.i32) + %8 = llvm.mlir.constant(1) : i1 + llvm.cond_br %8, ^bb1(%4 : i32), ^bb2(%4 : i32) -// CHECK:^[[BB1]](%[[V9:.*]]: !llvm.i32): -// CHECK: %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> +// CHECK:^[[BB1]](%[[V9:.*]]: i32): +// CHECK: %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (i32) -> !llvm.struct<(i32, double, i32)> // CHECK: %[[V11:.*]] = llvm.extractvalue %[[V10]][0] : !llvm.struct<(i32, double, i32)> // CHECK: %[[V12:.*]] = llvm.extractvalue %[[V10]][1] : !llvm.struct<(i32, double, i32)> // CHECK: %[[V13:.*]] = llvm.extractvalue %[[V10]][2] : !llvm.struct<(i32, double, i32)> @@ -197,8 +197,8 @@ // CHECK: %[[V16:.*]] = llvm.insertvalue %[[V7]], %[[V15]][1] : !llvm.struct<(i32, double, i32)> // CHECK: %[[V17:.*]] = llvm.insertvalue %[[V11]], %[[V16]][2] : !llvm.struct<(i32, double, i32)> // CHECK: llvm.return %[[V17]] : !llvm.struct<(i32, double, i32)> -^bb1(%9: !llvm.i32): - %10 = llvm.call @foo(%9) : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> +^bb1(%9: i32): + %10 = llvm.call @foo(%9) : (i32) -> !llvm.struct<(i32, double, i32)> %11 = llvm.extractvalue %10[0] : !llvm.struct<(i32, double, i32)> %12 = llvm.extractvalue %10[1] : !llvm.struct<(i32, double, i32)> %13 = llvm.extractvalue %10[2] : !llvm.struct<(i32, double, i32)> @@ -208,13 +208,13 @@ %17 = llvm.insertvalue %11, %16[2] : !llvm.struct<(i32, double, i32)> llvm.return %17 : !llvm.struct<(i32, double, i32)> -// CHECK:^[[BB2]](%[[V18:.*]]: !llvm.i32): +// CHECK:^[[BB2]](%[[V18:.*]]: i32): // CHECK: %[[V19:.*]] = llvm.mlir.undef : !llvm.struct<(i32, double, i32)> // CHECK: %[[V20:.*]] = llvm.insertvalue %[[V18]], %[[V19]][0] : !llvm.struct<(i32, double, i32)> // CHECK: %[[V21:.*]] = llvm.insertvalue %[[V7]], %[[V20]][1] : !llvm.struct<(i32, double, i32)> // CHECK: %[[V22:.*]] = llvm.insertvalue %[[V5]], %[[V21]][2] : !llvm.struct<(i32, double, i32)> // CHECK: llvm.return %[[V22]] : !llvm.struct<(i32, double, i32)> -^bb2(%18: !llvm.i32): +^bb2(%18: i32): %19 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)> %20 = llvm.insertvalue %18, %19[0] : !llvm.struct<(i32, double, i32)> %21 = llvm.insertvalue %7, %20[1] : !llvm.struct<(i32, double, i32)> @@ -223,40 +223,40 @@ } // CHECK-LABEL: @casts -// CHECK-SAME: (%[[I32:.*]]: !llvm.i32, %[[I64:.*]]: !llvm.i64, %[[V4I32:.*]]: !llvm.vec<4 x i32>, %[[V4I64:.*]]: !llvm.vec<4 x i64>, %[[I32PTR:.*]]: !llvm.ptr) -func @casts(%arg0: !llvm.i32, %arg1: !llvm.i64, %arg2: !llvm.vec<4 x i32>, +// CHECK-SAME: (%[[I32:.*]]: i32, %[[I64:.*]]: i64, %[[V4I32:.*]]: !llvm.vec<4 x i32>, %[[V4I64:.*]]: !llvm.vec<4 x i64>, %[[I32PTR:.*]]: !llvm.ptr) +func @casts(%arg0: i32, %arg1: i64, %arg2: !llvm.vec<4 x i32>, %arg3: !llvm.vec<4 x i64>, %arg4: !llvm.ptr) { -// CHECK: = llvm.sext %[[I32]] : !llvm.i32 to !llvm.i56 - %0 = llvm.sext %arg0 : !llvm.i32 to !llvm.i56 -// CHECK: = llvm.zext %[[I32]] : !llvm.i32 to !llvm.i64 - %1 = llvm.zext %arg0 : !llvm.i32 to !llvm.i64 -// CHECK: = llvm.trunc %[[I64]] : !llvm.i64 to !llvm.i56 - %2 = llvm.trunc %arg1 : !llvm.i64 to !llvm.i56 +// CHECK: = llvm.sext %[[I32]] : i32 to i56 + %0 = llvm.sext %arg0 : i32 to i56 +// CHECK: = llvm.zext %[[I32]] : i32 to i64 + %1 = llvm.zext %arg0 : i32 to i64 +// CHECK: = llvm.trunc %[[I64]] : i64 to i56 + %2 = llvm.trunc %arg1 : i64 to i56 // CHECK: = llvm.sext %[[V4I32]] : !llvm.vec<4 x i32> to !llvm.vec<4 x i56> %3 = llvm.sext %arg2 : !llvm.vec<4 x i32> to !llvm.vec<4 x i56> // CHECK: = llvm.zext %[[V4I32]] : !llvm.vec<4 x i32> to !llvm.vec<4 x i64> %4 = llvm.zext %arg2 : !llvm.vec<4 x i32> to !llvm.vec<4 x i64> // CHECK: = llvm.trunc %[[V4I64]] : !llvm.vec<4 x i64> to !llvm.vec<4 x i56> %5 = llvm.trunc %arg3 : !llvm.vec<4 x i64> to !llvm.vec<4 x i56> -// CHECK: = llvm.sitofp %[[I32]] : !llvm.i32 to !llvm.float - %6 = llvm.sitofp %arg0 : !llvm.i32 to !llvm.float -// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : !llvm.i32 to !llvm.float - %7 = llvm.uitofp %arg0 : !llvm.i32 to !llvm.float -// CHECK: = llvm.fptosi %[[FLOAT]] : !llvm.float to !llvm.i32 - %8 = llvm.fptosi %7 : !llvm.float to !llvm.i32 -// CHECK: = llvm.fptoui %[[FLOAT]] : !llvm.float to !llvm.i32 - %9 = llvm.fptoui %7 : !llvm.float to !llvm.i32 +// CHECK: = llvm.sitofp %[[I32]] : i32 to !llvm.float + %6 = llvm.sitofp %arg0 : i32 to !llvm.float +// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : i32 to !llvm.float + %7 = llvm.uitofp %arg0 : i32 to !llvm.float +// CHECK: = llvm.fptosi %[[FLOAT]] : !llvm.float to i32 + %8 = llvm.fptosi %7 : !llvm.float to i32 +// CHECK: = llvm.fptoui %[[FLOAT]] : !llvm.float to i32 + %9 = llvm.fptoui %7 : !llvm.float to i32 // CHECK: = llvm.addrspacecast %[[I32PTR]] : !llvm.ptr to !llvm.ptr %10 = llvm.addrspacecast %arg4 : !llvm.ptr to !llvm.ptr llvm.return } // CHECK-LABEL: @vect -func @vect(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) { +func @vect(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) { // CHECK: = llvm.extractelement {{.*}} : !llvm.vec<4 x float> - %0 = llvm.extractelement %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float> + %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x float> // CHECK: = llvm.insertelement {{.*}} : !llvm.vec<4 x float> - %1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float> + %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x float> // CHECK: = llvm.shufflevector {{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float> %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float> // CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float> @@ -265,11 +265,11 @@ } // CHECK-LABEL: @alloca -func @alloca(%size : !llvm.i64) { - // CHECK: llvm.alloca %{{.*}} x !llvm.i32 : (!llvm.i64) -> !llvm.ptr - llvm.alloca %size x !llvm.i32 {alignment = 0} : (!llvm.i64) -> (!llvm.ptr) - // CHECK: llvm.alloca %{{.*}} x !llvm.i32 {alignment = 8 : i64} : (!llvm.i64) -> !llvm.ptr - llvm.alloca %size x !llvm.i32 {alignment = 8} : (!llvm.i64) -> (!llvm.ptr) +func @alloca(%size : i64) { + // CHECK: llvm.alloca %{{.*}} x i32 : (i64) -> !llvm.ptr + llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr) + // CHECK: llvm.alloca %{{.*}} x i32 {alignment = 8 : i64} : (i64) -> !llvm.ptr + llvm.alloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr) llvm.return } @@ -290,38 +290,38 @@ } // CHECK-LABEL: @cmpxchg -func @cmpxchg(%ptr : !llvm.ptr, %cmp : !llvm.i32, %new : !llvm.i32) { - // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : !llvm.i32 - %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : !llvm.i32 +func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %new : i32) { + // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : i32 + %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : i32 llvm.return } llvm.mlir.global external constant @_ZTIi() : !llvm.ptr llvm.func @bar(!llvm.ptr, !llvm.ptr, !llvm.ptr) -llvm.func @__gxx_personality_v0(...) -> !llvm.i32 +llvm.func @__gxx_personality_v0(...) -> i32 // CHECK-LABEL: @invokeLandingpad -llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_personality_v0 } { -// CHECK: %[[a0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32 -// CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : !llvm.i32 +llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personality_v0 } { +// CHECK: %[[a0:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : i32 // CHECK: %[[a2:.*]] = llvm.mlir.constant("\01") : !llvm.array<1 x i8> // CHECK: %[[a3:.*]] = llvm.mlir.null : !llvm.ptr> // CHECK: %[[a4:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[a5:.*]] = llvm.mlir.addressof @_ZTIi : !llvm.ptr> // CHECK: %[[a6:.*]] = llvm.bitcast %[[a5]] : !llvm.ptr> to !llvm.ptr -// CHECK: %[[a7:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32 -// CHECK: %[[a8:.*]] = llvm.alloca %[[a7]] x !llvm.i8 : (!llvm.i32) -> !llvm.ptr -// CHECK: %{{.*}} = llvm.invoke @foo(%[[a7]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> - %0 = llvm.mlir.constant(0 : i32) : !llvm.i32 - %1 = llvm.mlir.constant(3 : i32) : !llvm.i32 +// CHECK: %[[a7:.*]] = llvm.mlir.constant(1 : i32) : i32 +// CHECK: %[[a8:.*]] = llvm.alloca %[[a7]] x i8 : (i32) -> !llvm.ptr +// CHECK: %{{.*}} = llvm.invoke @foo(%[[a7]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (i32) -> !llvm.struct<(i32, double, i32)> + %0 = llvm.mlir.constant(0 : i32) : i32 + %1 = llvm.mlir.constant(3 : i32) : i32 %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8> %3 = llvm.mlir.null : !llvm.ptr> %4 = llvm.mlir.null : !llvm.ptr %5 = llvm.mlir.addressof @_ZTIi : !llvm.ptr> %6 = llvm.bitcast %5 : !llvm.ptr> to !llvm.ptr - %7 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %8 = llvm.alloca %7 x !llvm.i8 : (!llvm.i32) -> !llvm.ptr - %9 = llvm.invoke @foo(%7) to ^bb2 unwind ^bb1 : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> + %7 = llvm.mlir.constant(1 : i32) : i32 + %8 = llvm.alloca %7 x i8 : (i32) -> !llvm.ptr + %9 = llvm.invoke @foo(%7) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, double, i32)> // CHECK: ^[[BB1]]: // CHECK: %[[lp:.*]] = llvm.landingpad cleanup (catch %[[a3]] : !llvm.ptr>) (catch %[[a6]] : !llvm.ptr) (filter %[[a2]] : !llvm.array<1 x i8>) : !llvm.struct<(ptr, i32)> @@ -331,9 +331,9 @@ llvm.resume %10 : !llvm.struct<(ptr, i32)> // CHECK: ^[[BB2]]: -// CHECK: llvm.return %[[a7]] : !llvm.i32 +// CHECK: llvm.return %[[a7]] : i32 ^bb2: - llvm.return %7 : !llvm.i32 + llvm.return %7 : i32 // CHECK: ^[[BB3:.*]]: // CHECK: llvm.invoke @bar(%[[a8]], %[[a6]], %[[a4]]) to ^[[BB2]] unwind ^[[BB1]] : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> () @@ -341,19 +341,19 @@ llvm.invoke @bar(%8, %6, %4) to ^bb2 unwind ^bb1 : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> () // CHECK: ^[[BB4:.*]]: -// CHECK: llvm.return %[[a0]] : !llvm.i32 +// CHECK: llvm.return %[[a0]] : i32 ^bb4: - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 } // CHECK-LABEL: @useFreezeOp -func @useFreezeOp(%arg0: !llvm.i32) { - // CHECK: = llvm.freeze %[[ARG0:.*]] : !llvm.i32 - %0 = llvm.freeze %arg0 : !llvm.i32 - // CHECK: %[[x:.*]] = llvm.mlir.undef : !llvm.i8 - %1 = llvm.mlir.undef : !llvm.i8 - // CHECK: = llvm.freeze %[[x]] : !llvm.i8 - %2 = llvm.freeze %1 : !llvm.i8 +func @useFreezeOp(%arg0: i32) { + // CHECK: = llvm.freeze %[[ARG0:.*]] : i32 + %0 = llvm.freeze %arg0 : i32 + // CHECK: %[[x:.*]] = llvm.mlir.undef : i8 + %1 = llvm.mlir.undef : i8 + // CHECK: = llvm.freeze %[[x]] : i8 + %2 = llvm.freeze %1 : i8 return } @@ -369,27 +369,27 @@ } // CHECK-LABEL: @useInlineAsm -llvm.func @useInlineAsm(%arg0: !llvm.i32) { - // CHECK: llvm.inline_asm {{.*}} (!llvm.i32) -> !llvm.i8 - %0 = llvm.inline_asm "bswap $0", "=r,r" %arg0 : (!llvm.i32) -> !llvm.i8 +llvm.func @useInlineAsm(%arg0: i32) { + // CHECK: llvm.inline_asm {{.*}} (i32) -> i8 + %0 = llvm.inline_asm "bswap $0", "=r,r" %arg0 : (i32) -> i8 - // CHECK-NEXT: llvm.inline_asm {{.*}} (!llvm.i32, !llvm.i32) -> !llvm.i8 - %1 = llvm.inline_asm "foo", "bar" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + // CHECK-NEXT: llvm.inline_asm {{.*}} (i32, i32) -> i8 + %1 = llvm.inline_asm "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 - // CHECK-NEXT: llvm.inline_asm has_side_effects {{.*}} (!llvm.i32, !llvm.i32) -> !llvm.i8 - %2 = llvm.inline_asm has_side_effects "foo", "bar" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + // CHECK-NEXT: llvm.inline_asm has_side_effects {{.*}} (i32, i32) -> i8 + %2 = llvm.inline_asm has_side_effects "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 - // CHECK-NEXT: llvm.inline_asm is_align_stack {{.*}} (!llvm.i32, !llvm.i32) -> !llvm.i8 - %3 = llvm.inline_asm is_align_stack "foo", "bar" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + // CHECK-NEXT: llvm.inline_asm is_align_stack {{.*}} (i32, i32) -> i8 + %3 = llvm.inline_asm is_align_stack "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 - // CHECK-NEXT: llvm.inline_asm "foo", "=r,=r,r" {{.*}} : (!llvm.i32) -> !llvm.struct<(i8, i8)> - %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (!llvm.i32) -> !llvm.struct<(i8, i8)> + // CHECK-NEXT: llvm.inline_asm "foo", "=r,=r,r" {{.*}} : (i32) -> !llvm.struct<(i8, i8)> + %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (i32) -> !llvm.struct<(i8, i8)> llvm.return } // CHECK-LABEL: @fastmathFlags -func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32) { +func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32) { // CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath} : !llvm.float // CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath} : !llvm.float // CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath} : !llvm.float @@ -407,8 +407,8 @@ // CHECK: {{.*}} = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath} : !llvm.float %6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath} : !llvm.float -// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath} : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> - %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath} : (!llvm.i32) -> !llvm.struct<(i32, double, i32)> +// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath} : (i32) -> !llvm.struct<(i32, double, i32)> + %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath} : (i32) -> !llvm.struct<(i32, double, i32)> // CHECK: {{.*}} = llvm.fadd %arg0, %arg1 : !llvm.float %8 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<>} : !llvm.float diff --git a/mlir/test/Dialect/LLVMIR/terminator.mlir b/mlir/test/Dialect/LLVMIR/terminator.mlir --- a/mlir/test/Dialect/LLVMIR/terminator.mlir +++ b/mlir/test/Dialect/LLVMIR/terminator.mlir @@ -11,7 +11,7 @@ // CHECK: llvm.br // CHECK: llvm.cond_br // CHECK: llvm.return -func @control_flow(%cond : !llvm.i1) { +func @control_flow(%cond : i1) { llvm.br ^bb1 ^bb1: llvm.cond_br %cond, ^bb2, ^bb1 diff --git a/mlir/test/Dialect/LLVMIR/types-invalid.mlir b/mlir/test/Dialect/LLVMIR/types-invalid.mlir --- a/mlir/test/Dialect/LLVMIR/types-invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/types-invalid.mlir @@ -76,7 +76,7 @@ // ----- func @unexpected_type() { - // expected-error @+1 {{unexpected type, expected i* or keyword}} + // expected-error @+1 {{unexpected type, expected keyword}} "some.op"() : () -> !llvm.f32 } @@ -150,3 +150,14 @@ // expected-error @+1 {{invalid vector element type}} "some.op"() : () -> !llvm.vec } + +// ----- + +// expected-warning @+1 {{deprecated syntax, drop '!llvm.' for integers}} +func private @deprecated_int() -> !llvm.i32 + +// ----- + + +// expected-error @+1 {{unexpected type, expected keyword}} +func private @unexpected_type() -> !llvm.tensor<*xf32> diff --git a/mlir/test/Dialect/LLVMIR/types.mlir b/mlir/test/Dialect/LLVMIR/types.mlir --- a/mlir/test/Dialect/LLVMIR/types.mlir +++ b/mlir/test/Dialect/LLVMIR/types.mlir @@ -50,20 +50,20 @@ // CHECK-LABEL: @integer func @integer() { - // CHECK: !llvm.i1 - "some.op"() : () -> !llvm.i1 - // CHECK: !llvm.i8 - "some.op"() : () -> !llvm.i8 - // CHECK: !llvm.i16 - "some.op"() : () -> !llvm.i16 - // CHECK: !llvm.i32 - "some.op"() : () -> !llvm.i32 - // CHECK: !llvm.i64 - "some.op"() : () -> !llvm.i64 - // CHECK: !llvm.i57 - "some.op"() : () -> !llvm.i57 - // CHECK: !llvm.i129 - "some.op"() : () -> !llvm.i129 + // CHECK: i1 + "some.op"() : () -> i1 + // CHECK: i8 + "some.op"() : () -> i8 + // CHECK: i16 + "some.op"() : () -> i16 + // CHECK: i32 + "some.op"() : () -> i32 + // CHECK: i64 + "some.op"() : () -> i64 + // CHECK: i57 + "some.op"() : () -> i57 + // CHECK: i129 + "some.op"() : () -> i129 return } @@ -184,7 +184,7 @@ func @verbose() { // CHECK: !llvm.struct<(i64, struct<(float)>)> - "some.op"() : () -> !llvm.struct<(!llvm.i64, !llvm.struct<(!llvm.float)>)> + "some.op"() : () -> !llvm.struct<(i64, !llvm.struct<(!llvm.float)>)> return } @@ -195,7 +195,7 @@ // DialectAsmPrinter to have a mechanism for querying the presence and // usability of an alias outside of its `printType` method. -!baz = type !llvm.i64 +!baz = type i64 !qux = type !llvm.struct<(!baz)> !rec = type !llvm.struct<"a", (ptr>)> diff --git a/mlir/test/Dialect/Linalg/llvm.mlir b/mlir/test/Dialect/Linalg/llvm.mlir --- a/mlir/test/Dialect/Linalg/llvm.mlir +++ b/mlir/test/Dialect/Linalg/llvm.mlir @@ -6,9 +6,9 @@ %R = linalg.range %c0:%arg0:%c1 : !linalg.range return } -// CHECK-LABEL: func @range(%{{.*}}: !llvm.i64) { -// CHECK: llvm.mlir.constant(0 : index) : !llvm.i64 -// CHECK-NEXT: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK-LABEL: func @range(%{{.*}}: i64) { +// CHECK: llvm.mlir.constant(0 : index) : i64 +// CHECK-NEXT: llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(i64, i64, i64)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(i64, i64, i64)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(i64, i64, i64)> @@ -23,8 +23,8 @@ // CHECK: llvm.extractvalue %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: llvm.extractvalue %{{.*}}[0] : !llvm.struct<(i64, i64, i64)> -// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.add %{{.*}}, %{{.*}} : !llvm.i64 +// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : i64 +// CHECK-NEXT: llvm.add %{{.*}}, %{{.*}} : i64 // insert offset // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> @@ -34,15 +34,15 @@ // CHECK-NEXT: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(i64, i64, i64)> // get size[0] from parent view // CHECK-NEXT: llvm.extractvalue %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> -// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.i64 +// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64 +// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i64 // compute size[0] bounded by parent view's size[0] -// CHECK-NEXT: llvm.sub %{{.*}}, %{{.*}} : !llvm.i64 +// CHECK-NEXT: llvm.sub %{{.*}}, %{{.*}} : i64 // bound below by 0 -// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64 -// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : !llvm.i1, !llvm.i64 +// CHECK-NEXT: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64 +// CHECK-NEXT: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i64 // compute stride[0] using bounded size -// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : !llvm.i64 +// CHECK-NEXT: llvm.mul %{{.*}}, %{{.*}} : i64 // insert size and stride // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> @@ -85,25 +85,25 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> // CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(3 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(3 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 3] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(5 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 4] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(60 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(60 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(20 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(20 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(5 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(5 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 3] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> { @@ -121,17 +121,17 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.mlir.constant(3 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(3 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(5 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.mlir.constant(20 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(20 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.mlir.constant(5 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(5 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> func @reshape_fold_zero_dim(%arg0 : memref<1x1xf32>) -> memref { @@ -159,11 +159,11 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> -// CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 +// CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -29,19 +29,19 @@ } // CHECK-LABEL: func @omp_flush -// CHECK-SAME: ([[ARG0:%.*]]: !llvm.i32) { -func @omp_flush(%arg0 : !llvm.i32) -> () { +// CHECK-SAME: ([[ARG0:%.*]]: i32) { +func @omp_flush(%arg0 : i32) -> () { // Test without data var // CHECK: omp.flush omp.flush // Test with one data var - // CHECK: omp.flush([[ARG0]] : !llvm.i32) - omp.flush(%arg0 : !llvm.i32) + // CHECK: omp.flush([[ARG0]] : i32) + omp.flush(%arg0 : i32) // Test with two data var - // CHECK: omp.flush([[ARG0]], [[ARG0]] : !llvm.i32, !llvm.i32) - omp.flush(%arg0, %arg0: !llvm.i32, !llvm.i32) + // CHECK: omp.flush([[ARG0]], [[ARG0]] : i32, i32) + omp.flush(%arg0, %arg0: i32, i32) return } diff --git a/mlir/test/Dialect/SPIRV/IR/types.mlir b/mlir/test/Dialect/SPIRV/IR/types.mlir --- a/mlir/test/Dialect/SPIRV/IR/types.mlir +++ b/mlir/test/Dialect/SPIRV/IR/types.mlir @@ -72,8 +72,8 @@ // ----- -// expected-error @+1 {{cannot use '!llvm.i32' to compose SPIR-V types}} -func private @llvm_type(!spv.array<4x!llvm.i32>) -> () +// expected-error @+1 {{cannot use '!llvm.struct<()>' to compose SPIR-V types}} +func private @llvm_type(!spv.array<4x!llvm.struct<()>>) -> () // ----- diff --git a/mlir/test/Target/arm-sve.mlir b/mlir/test/Target/arm-sve.mlir --- a/mlir/test/Target/arm-sve.mlir +++ b/mlir/test/Target/arm-sve.mlir @@ -49,8 +49,8 @@ } // CHECK-LABEL: define i64 @get_vector_scale() -llvm.func @get_vector_scale() -> !llvm.i64 { +llvm.func @get_vector_scale() -> i64 { // CHECK: call i64 @llvm.vscale.i64() - %0 = "llvm_arm_sve.vscale"() : () -> !llvm.i64 - llvm.return %0 : !llvm.i64 + %0 = "llvm_arm_sve.vscale"() : () -> i64 + llvm.return %0 : i64 } diff --git a/mlir/test/Target/avx512.mlir b/mlir/test/Target/avx512.mlir --- a/mlir/test/Target/avx512.mlir +++ b/mlir/test/Target/avx512.mlir @@ -2,30 +2,30 @@ // CHECK-LABEL: define <16 x float> @LLVM_x86_avx512_mask_ps_512 llvm.func @LLVM_x86_avx512_mask_ps_512(%a: !llvm.vec<16 x float>, - %b: !llvm.i32, - %c: !llvm.i16) + %b: i32, + %c: i16) -> (!llvm.vec<16 x float>) { // CHECK: call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %0 = "llvm_avx512.mask.rndscale.ps.512"(%a, %b, %a, %c, %b) : - (!llvm.vec<16 x float>, !llvm.i32, !llvm.vec<16 x float>, !llvm.i16, !llvm.i32) -> !llvm.vec<16 x float> + (!llvm.vec<16 x float>, i32, !llvm.vec<16 x float>, i16, i32) -> !llvm.vec<16 x float> // CHECK: call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float> %1 = "llvm_avx512.mask.scalef.ps.512"(%a, %a, %a, %c, %b) : - (!llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.i16, !llvm.i32) -> !llvm.vec<16 x float> + (!llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.vec<16 x float>, i16, i32) -> !llvm.vec<16 x float> llvm.return %1: !llvm.vec<16 x float> } // CHECK-LABEL: define <8 x double> @LLVM_x86_avx512_mask_pd_512 llvm.func @LLVM_x86_avx512_mask_pd_512(%a: !llvm.vec<8 x double>, - %b: !llvm.i32, - %c: !llvm.i8) + %b: i32, + %c: i8) -> (!llvm.vec<8 x double>) { // CHECK: call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %0 = "llvm_avx512.mask.rndscale.pd.512"(%a, %b, %a, %c, %b) : - (!llvm.vec<8 x double>, !llvm.i32, !llvm.vec<8 x double>, !llvm.i8, !llvm.i32) -> !llvm.vec<8 x double> + (!llvm.vec<8 x double>, i32, !llvm.vec<8 x double>, i8, i32) -> !llvm.vec<8 x double> // CHECK: call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double> %1 = "llvm_avx512.mask.scalef.pd.512"(%a, %a, %a, %c, %b) : - (!llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.i8, !llvm.i32) -> !llvm.vec<8 x double> + (!llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.vec<8 x double>, i8, i32) -> !llvm.vec<8 x double> llvm.return %1: !llvm.vec<8 x double> } diff --git a/mlir/test/Target/import.ll b/mlir/test/Target/import.ll --- a/mlir/test/Target/import.ll +++ b/mlir/test/Target/import.ll @@ -16,8 +16,8 @@ @g4 = external global i32, align 8 ; CHECK: llvm.mlir.global internal constant @int_gep() : !llvm.ptr { ; CHECK-DAG: %[[addr:[0-9]+]] = llvm.mlir.addressof @g4 : !llvm.ptr -; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : !llvm.i32 -; CHECK-NEXT: %[[gepinit:[0-9]+]] = llvm.getelementptr %[[addr]][%[[c2]]] : (!llvm.ptr, !llvm.i32) -> !llvm.ptr +; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : i32 +; CHECK-NEXT: %[[gepinit:[0-9]+]] = llvm.getelementptr %[[addr]][%[[c2]]] : (!llvm.ptr, i32) -> !llvm.ptr ; CHECK-NEXT: llvm.return %[[gepinit]] : !llvm.ptr ; CHECK-NEXT: } @int_gep = internal constant i32* getelementptr (i32, i32* @g4, i32 2) @@ -26,27 +26,27 @@ ; Linkage attribute. ; -; CHECK: llvm.mlir.global private @private(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global private @private(42 : i32) : i32 @private = private global i32 42 -; CHECK: llvm.mlir.global internal @internal(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global internal @internal(42 : i32) : i32 @internal = internal global i32 42 -; CHECK: llvm.mlir.global available_externally @available_externally(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global available_externally @available_externally(42 : i32) : i32 @available_externally = available_externally global i32 42 -; CHECK: llvm.mlir.global linkonce @linkonce(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global linkonce @linkonce(42 : i32) : i32 @linkonce = linkonce global i32 42 -; CHECK: llvm.mlir.global weak @weak(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global weak @weak(42 : i32) : i32 @weak = weak global i32 42 -; CHECK: llvm.mlir.global common @common(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global common @common(42 : i32) : i32 @common = common global i32 42 -; CHECK: llvm.mlir.global appending @appending(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global appending @appending(42 : i32) : i32 @appending = appending global i32 42 -; CHECK: llvm.mlir.global extern_weak @extern_weak() : !llvm.i32 +; CHECK: llvm.mlir.global extern_weak @extern_weak() : i32 @extern_weak = extern_weak global i32 -; CHECK: llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : i32 @linkonce_odr = linkonce_odr global i32 42 -; CHECK: llvm.mlir.global weak_odr @weak_odr(42 : i32) : !llvm.i32 +; CHECK: llvm.mlir.global weak_odr @weak_odr(42 : i32) : i32 @weak_odr = weak_odr global i32 42 -; CHECK: llvm.mlir.global external @external() : !llvm.i32 +; CHECK: llvm.mlir.global external @external() : i32 @external = external global i32 ; @@ -73,54 +73,54 @@ ret void } -; CHECK: llvm.func @fe(!llvm.i32) -> !llvm.float +; CHECK: llvm.func @fe(i32) -> !llvm.float declare float @fe(i32) ; FIXME: function attributes. -; CHECK-LABEL: llvm.func internal @f1(%arg0: !llvm.i64) -> !llvm.i32 { -; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : !llvm.i32 -; CHECK-DAG: %[[c42:[0-9]+]] = llvm.mlir.constant(42 : i32) : !llvm.i32 -; CHECK-DAG: %[[c1:[0-9]+]] = llvm.mlir.constant(true) : !llvm.i1 -; CHECK-DAG: %[[c43:[0-9]+]] = llvm.mlir.constant(43 : i32) : !llvm.i32 +; CHECK-LABEL: llvm.func internal @f1(%arg0: i64) -> i32 { +; CHECK-DAG: %[[c2:[0-9]+]] = llvm.mlir.constant(2 : i32) : i32 +; CHECK-DAG: %[[c42:[0-9]+]] = llvm.mlir.constant(42 : i32) : i32 +; CHECK-DAG: %[[c1:[0-9]+]] = llvm.mlir.constant(true) : i1 +; CHECK-DAG: %[[c43:[0-9]+]] = llvm.mlir.constant(43 : i32) : i32 define internal dso_local i32 @f1(i64 %a) norecurse { entry: -; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : !llvm.i64 to !llvm.ptr +; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr %aa = inttoptr i64 %a to i64* ; %[[addrof:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr ; %[[addrof2:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr -; %{{[0-9]+}} = llvm.inttoptr %arg0 : !llvm.i64 to !llvm.ptr -; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr to !llvm.i64 -; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr, !llvm.i32) -> !llvm.ptr +; %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr +; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr to i64 +; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr, i32) -> !llvm.ptr %bb = ptrtoint double* @g2 to i64 %cc = getelementptr double, double* @g2, i32 2 -; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : !llvm.i64 to !llvm.i32 +; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : i64 to i32 %b = trunc i64 %a to i32 -; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (!llvm.i32) -> !llvm.float +; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (i32) -> !llvm.float %c = call float @fe(i32 %b) -; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : !llvm.float to !llvm.i32 +; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : !llvm.float to i32 %d = fptosi float %c to i32 ; FIXME: icmp should return i1. -; CHECK: %[[e:[0-9]+]] = llvm.icmp "ne" %[[d]], %[[c2]] : !llvm.i32 +; CHECK: %[[e:[0-9]+]] = llvm.icmp "ne" %[[d]], %[[c2]] : i32 %e = icmp ne i32 %d, 2 ; CHECK: llvm.cond_br %[[e]], ^bb1, ^bb2 br i1 %e, label %if.then, label %if.end ; CHECK: ^bb1: if.then: -; CHECK: llvm.return %[[c42]] : !llvm.i32 +; CHECK: llvm.return %[[c42]] : i32 ret i32 42 ; CHECK: ^bb2: if.end: -; CHECK: %[[orcond:[0-9]+]] = llvm.or %[[e]], %[[c1]] : !llvm.i1 +; CHECK: %[[orcond:[0-9]+]] = llvm.or %[[e]], %[[c1]] : i1 %or.cond = or i1 %e, 1 ; CHECK: llvm.return %[[c43]] ret i32 43 } ; Test that instructions that dominate can be out of sequential order. -; CHECK-LABEL: llvm.func @f2(%arg0: !llvm.i64) -> !llvm.i64 { -; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : !llvm.i64 +; CHECK-LABEL: llvm.func @f2(%arg0: i64) -> i64 { +; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : i64 define i64 @f2(i64 %a) noduplicate { entry: ; CHECK: llvm.br ^bb2 @@ -133,21 +133,21 @@ ; CHECK: ^bb2: next: -; CHECK: %1 = llvm.add %arg0, %[[c3]] : !llvm.i64 +; CHECK: %1 = llvm.add %arg0, %[[c3]] : i64 %b = add i64 %a, 3 ; CHECK: llvm.br ^bb1 br label %end } ; Test arguments/phis. -; CHECK-LABEL: llvm.func @f2_phis(%arg0: !llvm.i64) -> !llvm.i64 { -; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : !llvm.i64 +; CHECK-LABEL: llvm.func @f2_phis(%arg0: i64) -> i64 { +; CHECK-DAG: %[[c3:[0-9]+]] = llvm.mlir.constant(3 : i64) : i64 define i64 @f2_phis(i64 %a) noduplicate { entry: ; CHECK: llvm.br ^bb2 br label %next -; CHECK: ^bb1(%1: !llvm.i64): +; CHECK: ^bb1(%1: i64): end: %c = phi i64 [ %b, %next ] ; CHECK: llvm.return %1 @@ -155,7 +155,7 @@ ; CHECK: ^bb2: next: -; CHECK: %2 = llvm.add %arg0, %[[c3]] : !llvm.i64 +; CHECK: %2 = llvm.add %arg0, %[[c3]] : i64 %b = add i64 %a, 3 ; CHECK: llvm.br ^bb1 br label %end @@ -200,7 +200,7 @@ ; CHECK-LABEL: llvm.func @f6(%arg0: !llvm.ptr>) define void @f6(void (i16) *%fn) { -; CHECK: %[[c:[0-9]+]] = llvm.mlir.constant(0 : i16) : !llvm.i16 +; CHECK: %[[c:[0-9]+]] = llvm.mlir.constant(0 : i16) : i16 ; CHECK: llvm.call %arg0(%[[c]]) call void %fn(i16 0) ret void @@ -280,7 +280,7 @@ ; CHECK-LABEL: @invokeLandingpad define i32 @invokeLandingpad() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { ; CHECK: %[[a1:[0-9]+]] = llvm.bitcast %{{[0-9]+}} : !llvm.ptr>> to !llvm.ptr - ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x !llvm.i8 : (!llvm.i32) -> !llvm.ptr + ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 : (i32) -> !llvm.ptr %1 = alloca i8 ; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> () invoke void @foo(i8* %1) to label %4 unwind label %2 @@ -294,7 +294,7 @@ resume { i8*, i32 } %3 ; CHECK: ^bb2: - ; CHECK: llvm.return %{{[0-9]+}} : !llvm.i32 + ; CHECK: llvm.return %{{[0-9]+}} : i32 ret i32 1 ; CHECK: ^bb3: @@ -302,16 +302,16 @@ %6 = invoke i8* @bar(i8* %1) to label %4 unwind label %2 ; CHECK: ^bb4: - ; CHECK: llvm.return %{{[0-9]+}} : !llvm.i32 + ; CHECK: llvm.return %{{[0-9]+}} : i32 ret i32 0 } ;CHECK-LABEL: @useFreezeOp define i32 @useFreezeOp(i32 %x) { - ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9a-z]+}} : !llvm.i32 + ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9a-z]+}} : i32 %1 = freeze i32 %x %2 = add i8 10, 10 - ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9]+}} : !llvm.i8 + ;CHECK: %{{[0-9]+}} = llvm.freeze %{{[0-9]+}} : i8 %3 = freeze i8 %2 %poison = add nsw i1 0, undef ret i32 0 diff --git a/mlir/test/Target/llvmir-intrinsics.mlir b/mlir/test/Target/llvmir-intrinsics.mlir --- a/mlir/test/Target/llvmir-intrinsics.mlir +++ b/mlir/test/Target/llvmir-intrinsics.mlir @@ -2,9 +2,9 @@ // CHECK-LABEL: @intrinsics llvm.func @intrinsics(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.ptr) { - %c3 = llvm.mlir.constant(3 : i32) : !llvm.i32 - %c1 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %c0 = llvm.mlir.constant(0 : i32) : !llvm.i32 + %c3 = llvm.mlir.constant(3 : i32) : i32 + %c1 = llvm.mlir.constant(1 : i32) : i32 + %c0 = llvm.mlir.constant(0 : i32) : i32 // CHECK: call float @llvm.fmuladd.f32 "llvm.intr.fmuladd"(%arg0, %arg1, %arg0) : (!llvm.float, !llvm.float, !llvm.float) -> !llvm.float // CHECK: call <8 x float> @llvm.fmuladd.v8f32 @@ -14,7 +14,7 @@ // CHECK: call <8 x float> @llvm.fma.v8f32 "llvm.intr.fma"(%arg2, %arg2, %arg2) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float> // CHECK: call void @llvm.prefetch.p0i8(i8* %3, i32 0, i32 3, i32 1) - "llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr, !llvm.i32, !llvm.i32, !llvm.i32) -> () + "llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr, i32, i32, i32) -> () llvm.return } @@ -127,18 +127,18 @@ } // CHECK-LABEL: @bitreverse_test -llvm.func @bitreverse_test(%arg0: !llvm.i32, %arg1: !llvm.vec<8 x i32>) { +llvm.func @bitreverse_test(%arg0: i32, %arg1: !llvm.vec<8 x i32>) { // CHECK: call i32 @llvm.bitreverse.i32 - "llvm.intr.bitreverse"(%arg0) : (!llvm.i32) -> !llvm.i32 + "llvm.intr.bitreverse"(%arg0) : (i32) -> i32 // CHECK: call <8 x i32> @llvm.bitreverse.v8i32 "llvm.intr.bitreverse"(%arg1) : (!llvm.vec<8 x i32>) -> !llvm.vec<8 x i32> llvm.return } // CHECK-LABEL: @ctpop_test -llvm.func @ctpop_test(%arg0: !llvm.i32, %arg1: !llvm.vec<8 x i32>) { +llvm.func @ctpop_test(%arg0: i32, %arg1: !llvm.vec<8 x i32>) { // CHECK: call i32 @llvm.ctpop.i32 - "llvm.intr.ctpop"(%arg0) : (!llvm.i32) -> !llvm.i32 + "llvm.intr.ctpop"(%arg0) : (i32) -> i32 // CHECK: call <8 x i32> @llvm.ctpop.v8i32 "llvm.intr.ctpop"(%arg1) : (!llvm.vec<8 x i32>) -> !llvm.vec<8 x i32> llvm.return @@ -163,18 +163,18 @@ } // CHECK-LABEL: @smax_test -llvm.func @smax_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @smax_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call i32 @llvm.smax.i32 - "llvm.intr.smax"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.i32 + "llvm.intr.smax"(%arg0, %arg1) : (i32, i32) -> i32 // CHECK: call <8 x i32> @llvm.smax.v8i32 "llvm.intr.smax"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.vec<8 x i32> llvm.return } // CHECK-LABEL: @smin_test -llvm.func @smin_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @smin_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call i32 @llvm.smin.i32 - "llvm.intr.smin"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.i32 + "llvm.intr.smin"(%arg0, %arg1) : (i32, i32) -> i32 // CHECK: call <8 x i32> @llvm.smin.v8i32 "llvm.intr.smin"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.vec<8 x i32> llvm.return @@ -183,25 +183,25 @@ // CHECK-LABEL: @vector_reductions llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %arg2: !llvm.vec<8 x i32>) { // CHECK: call i32 @llvm.vector.reduce.add.v8i32 - "llvm.intr.vector.reduce.add"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.add"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call i32 @llvm.vector.reduce.and.v8i32 - "llvm.intr.vector.reduce.and"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.and"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call float @llvm.vector.reduce.fmax.v8f32 "llvm.intr.vector.reduce.fmax"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.float // CHECK: call float @llvm.vector.reduce.fmin.v8f32 "llvm.intr.vector.reduce.fmin"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.float // CHECK: call i32 @llvm.vector.reduce.mul.v8i32 - "llvm.intr.vector.reduce.mul"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.mul"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call i32 @llvm.vector.reduce.or.v8i32 - "llvm.intr.vector.reduce.or"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.or"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call i32 @llvm.vector.reduce.smax.v8i32 - "llvm.intr.vector.reduce.smax"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.smax"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call i32 @llvm.vector.reduce.smin.v8i32 - "llvm.intr.vector.reduce.smin"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.smin"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call i32 @llvm.vector.reduce.umax.v8i32 - "llvm.intr.vector.reduce.umax"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.umax"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call i32 @llvm.vector.reduce.umin.v8i32 - "llvm.intr.vector.reduce.umin"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.umin"(%arg2) : (!llvm.vec<8 x i32>) -> i32 // CHECK: call float @llvm.vector.reduce.fadd.v8f32 "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float // CHECK: call float @llvm.vector.reduce.fmul.v8f32 @@ -211,14 +211,14 @@ // CHECK: call reassoc float @llvm.vector.reduce.fmul.v8f32 "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) {reassoc = true} : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float // CHECK: call i32 @llvm.vector.reduce.xor.v8i32 - "llvm.intr.vector.reduce.xor"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 + "llvm.intr.vector.reduce.xor"(%arg2) : (!llvm.vec<8 x i32>) -> i32 llvm.return } // CHECK-LABEL: @matrix_intrinsics // 4x16 16x3 llvm.func @matrix_intrinsics(%A: !llvm.vec<64 x float>, %B: !llvm.vec<48 x float>, - %ptr: !llvm.ptr, %stride: !llvm.i64) { + %ptr: !llvm.ptr, %stride: i64) { // CHECK: call <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float> %0, <48 x float> %1, i32 4, i32 16, i32 3) %C = llvm.intr.matrix.multiply %A, %B { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32} : @@ -229,18 +229,18 @@ // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16) %E = llvm.intr.matrix.column.major.load %ptr, { isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} : - !llvm.vec<48 x float> from !llvm.ptr stride !llvm.i64 + !llvm.vec<48 x float> from !llvm.ptr stride i64 // CHECK: call void @llvm.matrix.column.major.store.v48f32(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16) llvm.intr.matrix.column.major.store %E, %ptr, { isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} : - !llvm.vec<48 x float> to !llvm.ptr stride !llvm.i64 + !llvm.vec<48 x float> to !llvm.ptr stride i64 llvm.return } // CHECK-LABEL: @get_active_lane_mask -llvm.func @get_active_lane_mask(%base: !llvm.i64, %n: !llvm.i64) -> (!llvm.vec<7 x i1>) { +llvm.func @get_active_lane_mask(%base: i64, %n: i64) -> (!llvm.vec<7 x i1>) { // CHECK: call <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64 %0, i64 %1) - %0 = llvm.intr.get.active.lane.mask %base, %n : !llvm.i64, !llvm.i64 to !llvm.vec<7 x i1> + %0 = llvm.intr.get.active.lane.mask %base, %n : i64, i64 to !llvm.vec<7 x i1> llvm.return %0 : !llvm.vec<7 x i1> } @@ -284,64 +284,64 @@ } // CHECK-LABEL: @memcpy_test -llvm.func @memcpy_test(%arg0: !llvm.i32, %arg1: !llvm.i1, %arg2: !llvm.ptr, %arg3: !llvm.ptr) { +llvm.func @memcpy_test(%arg0: i32, %arg1: i1, %arg2: !llvm.ptr, %arg3: !llvm.ptr) { // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %{{.*}}, i8* %{{.*}}, i32 %{{.*}}, i1 %{{.*}}) - "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg1) : (!llvm.ptr, !llvm.ptr, !llvm.i32, !llvm.i1) -> () - %sz = llvm.mlir.constant(10: i64) : !llvm.i64 + "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg1) : (!llvm.ptr, !llvm.ptr, i32, i1) -> () + %sz = llvm.mlir.constant(10: i64) : i64 // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %{{.*}}, i8* %{{.*}}, i64 10, i1 %{{.*}}) - "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg1) : (!llvm.ptr, !llvm.ptr, !llvm.i64, !llvm.i1) -> () + "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg1) : (!llvm.ptr, !llvm.ptr, i64, i1) -> () llvm.return } // CHECK-LABEL: @sadd_with_overflow_test -llvm.func @sadd_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @sadd_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call { i32, i1 } @llvm.sadd.with.overflow.i32 - "llvm.intr.sadd.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + "llvm.intr.sadd.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)> // CHECK: call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32 "llvm.intr.sadd.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)> llvm.return } // CHECK-LABEL: @uadd_with_overflow_test -llvm.func @uadd_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @uadd_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call { i32, i1 } @llvm.uadd.with.overflow.i32 - "llvm.intr.uadd.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + "llvm.intr.uadd.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)> // CHECK: call { <8 x i32>, <8 x i1> } @llvm.uadd.with.overflow.v8i32 "llvm.intr.uadd.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)> llvm.return } // CHECK-LABEL: @ssub_with_overflow_test -llvm.func @ssub_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @ssub_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call { i32, i1 } @llvm.ssub.with.overflow.i32 - "llvm.intr.ssub.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + "llvm.intr.ssub.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)> // CHECK: call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32 "llvm.intr.ssub.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)> llvm.return } // CHECK-LABEL: @usub_with_overflow_test -llvm.func @usub_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @usub_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call { i32, i1 } @llvm.usub.with.overflow.i32 - "llvm.intr.usub.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + "llvm.intr.usub.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)> // CHECK: call { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32 "llvm.intr.usub.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)> llvm.return } // CHECK-LABEL: @smul_with_overflow_test -llvm.func @smul_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @smul_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call { i32, i1 } @llvm.smul.with.overflow.i32 - "llvm.intr.smul.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + "llvm.intr.smul.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)> // CHECK: call { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32 "llvm.intr.smul.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)> llvm.return } // CHECK-LABEL: @umul_with_overflow_test -llvm.func @umul_with_overflow_test(%arg0: !llvm.i32, %arg1: !llvm.i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { +llvm.func @umul_with_overflow_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !llvm.vec<8 x i32>) { // CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32 - "llvm.intr.umul.with.overflow"(%arg0, %arg1) : (!llvm.i32, !llvm.i32) -> !llvm.struct<(i32, i1)> + "llvm.intr.umul.with.overflow"(%arg0, %arg1) : (i32, i32) -> !llvm.struct<(i32, i1)> // CHECK: call { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32 "llvm.intr.umul.with.overflow"(%arg2, %arg3) : (!llvm.vec<8 x i32>, !llvm.vec<8 x i32>) -> !llvm.struct<(vec<8 x i32>, vec<8 x i1>)> llvm.return diff --git a/mlir/test/Target/llvmir-types.mlir b/mlir/test/Target/llvmir-types.mlir --- a/mlir/test/Target/llvmir-types.mlir +++ b/mlir/test/Target/llvmir-types.mlir @@ -28,17 +28,17 @@ // // CHECK: declare void @f_void_i32(i32) -llvm.func @f_void_i32(!llvm.i32) -> !llvm.void +llvm.func @f_void_i32(i32) -> !llvm.void // CHECK: declare i32 @f_i32_empty() -llvm.func @f_i32_empty() -> !llvm.i32 +llvm.func @f_i32_empty() -> i32 // CHECK: declare i32 @f_i32_half_bfloat_float_double(half, bfloat, float, double) -llvm.func @f_i32_half_bfloat_float_double(!llvm.half, !llvm.bfloat, !llvm.float, !llvm.double) -> !llvm.i32 +llvm.func @f_i32_half_bfloat_float_double(!llvm.half, !llvm.bfloat, !llvm.float, !llvm.double) -> i32 // CHECK: declare i32 @f_i32_i32_i32(i32, i32) -llvm.func @f_i32_i32_i32(!llvm.i32, !llvm.i32) -> !llvm.i32 +llvm.func @f_i32_i32_i32(i32, i32) -> i32 // CHECK: declare void @f_void_variadic(...) llvm.func @f_void_variadic(...) // CHECK: declare void @f_void_i32_i32_variadic(i32, i32, ...) -llvm.func @f_void_i32_i32_variadic(!llvm.i32, !llvm.i32, ...) +llvm.func @f_void_i32_i32_variadic(i32, i32, ...) // CHECK: declare i32 (i32)* @f_f_i32_i32() llvm.func @f_f_i32_i32() -> !llvm.ptr> @@ -47,19 +47,19 @@ // // CHECK: declare i1 @return_i1() -llvm.func @return_i1() -> !llvm.i1 +llvm.func @return_i1() -> i1 // CHECK: declare i8 @return_i8() -llvm.func @return_i8() -> !llvm.i8 +llvm.func @return_i8() -> i8 // CHECK: declare i16 @return_i16() -llvm.func @return_i16() -> !llvm.i16 +llvm.func @return_i16() -> i16 // CHECK: declare i32 @return_i32() -llvm.func @return_i32() -> !llvm.i32 +llvm.func @return_i32() -> i32 // CHECK: declare i64 @return_i64() -llvm.func @return_i64() -> !llvm.i64 +llvm.func @return_i64() -> i64 // CHECK: declare i57 @return_i57() -llvm.func @return_i57() -> !llvm.i57 +llvm.func @return_i57() -> i57 // CHECK: declare i129 @return_i129() -llvm.func @return_i129() -> !llvm.i129 +llvm.func @return_i129() -> i129 // // Pointers. diff --git a/mlir/test/Target/llvmir.mlir b/mlir/test/Target/llvmir.mlir --- a/mlir/test/Target/llvmir.mlir +++ b/mlir/test/Target/llvmir.mlir @@ -1,16 +1,16 @@ // RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s // CHECK: @i32_global = internal global i32 42 -llvm.mlir.global internal @i32_global(42: i32) : !llvm.i32 +llvm.mlir.global internal @i32_global(42: i32) : i32 // CHECK: @i32_const = internal constant i53 52 -llvm.mlir.global internal constant @i32_const(52: i53) : !llvm.i53 +llvm.mlir.global internal constant @i32_const(52: i53) : i53 // CHECK: @int_global_array = internal global [3 x i32] [i32 62, i32 62, i32 62] llvm.mlir.global internal @int_global_array(dense<62> : vector<3xi32>) : !llvm.array<3 x i32> // CHECK: @i32_global_addr_space = internal addrspace(7) global i32 62 -llvm.mlir.global internal @i32_global_addr_space(62: i32) {addr_space = 7 : i32} : !llvm.i32 +llvm.mlir.global internal @i32_global_addr_space(62: i32) {addr_space = 7 : i32} : i32 // CHECK: @float_global = internal global float 0.000000e+00 llvm.mlir.global internal @float_global(0.0: f32) : !llvm.float @@ -22,13 +22,13 @@ llvm.mlir.global internal constant @string_const("foobar") : !llvm.array<6 x i8> // CHECK: @int_global_undef = internal global i64 undef -llvm.mlir.global internal @int_global_undef() : !llvm.i64 +llvm.mlir.global internal @int_global_undef() : i64 // CHECK: @int_gep = internal constant i32* getelementptr (i32, i32* @i32_global, i32 2) llvm.mlir.global internal constant @int_gep() : !llvm.ptr { %addr = llvm.mlir.addressof @i32_global : !llvm.ptr - %_c0 = llvm.mlir.constant(2: i32) :!llvm.i32 - %gepinit = llvm.getelementptr %addr[%_c0] : (!llvm.ptr, !llvm.i32) -> !llvm.ptr + %_c0 = llvm.mlir.constant(2: i32) :i32 + %gepinit = llvm.getelementptr %addr[%_c0] : (!llvm.ptr, i32) -> !llvm.ptr llvm.return %gepinit : !llvm.ptr } @@ -37,27 +37,27 @@ // // CHECK: @private = private global i32 42 -llvm.mlir.global private @private(42 : i32) : !llvm.i32 +llvm.mlir.global private @private(42 : i32) : i32 // CHECK: @internal = internal global i32 42 -llvm.mlir.global internal @internal(42 : i32) : !llvm.i32 +llvm.mlir.global internal @internal(42 : i32) : i32 // CHECK: @available_externally = available_externally global i32 42 -llvm.mlir.global available_externally @available_externally(42 : i32) : !llvm.i32 +llvm.mlir.global available_externally @available_externally(42 : i32) : i32 // CHECK: @linkonce = linkonce global i32 42 -llvm.mlir.global linkonce @linkonce(42 : i32) : !llvm.i32 +llvm.mlir.global linkonce @linkonce(42 : i32) : i32 // CHECK: @weak = weak global i32 42 -llvm.mlir.global weak @weak(42 : i32) : !llvm.i32 +llvm.mlir.global weak @weak(42 : i32) : i32 // CHECK: @common = common global i32 42 -llvm.mlir.global common @common(42 : i32) : !llvm.i32 +llvm.mlir.global common @common(42 : i32) : i32 // CHECK: @appending = appending global i32 42 -llvm.mlir.global appending @appending(42 : i32) : !llvm.i32 +llvm.mlir.global appending @appending(42 : i32) : i32 // CHECK: @extern_weak = extern_weak global i32 -llvm.mlir.global extern_weak @extern_weak() : !llvm.i32 +llvm.mlir.global extern_weak @extern_weak() : i32 // CHECK: @linkonce_odr = linkonce_odr global i32 42 -llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : !llvm.i32 +llvm.mlir.global linkonce_odr @linkonce_odr(42 : i32) : i32 // CHECK: @weak_odr = weak_odr global i32 42 -llvm.mlir.global weak_odr @weak_odr(42 : i32) : !llvm.i32 +llvm.mlir.global weak_odr @weak_odr(42 : i32) : i32 // CHECK: @external = external global i32 -llvm.mlir.global external @external() : !llvm.i32 +llvm.mlir.global external @external() : i32 // @@ -66,7 +66,7 @@ // // CHECK: declare i8* @malloc(i64) -llvm.func @malloc(!llvm.i64) -> !llvm.ptr +llvm.func @malloc(i64) -> !llvm.ptr // CHECK: declare void @free(i8*) @@ -92,15 +92,15 @@ // Check the contracted form of load from array constants. // CHECK: load i8, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @string_const, i64 0, i64 0) %2 = llvm.mlir.addressof @string_const : !llvm.ptr> - %c0 = llvm.mlir.constant(0 : index) : !llvm.i64 - %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr>, !llvm.i64, !llvm.i64) -> !llvm.ptr + %c0 = llvm.mlir.constant(0 : index) : i64 + %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr>, i64, i64) -> !llvm.ptr %4 = llvm.load %3 : !llvm.ptr llvm.return } // CHECK-LABEL: declare void @body(i64) -llvm.func @body(!llvm.i64) +llvm.func @body(i64) // CHECK-LABEL: define void @simple_loop() @@ -112,16 +112,16 @@ // CHECK: [[SIMPLE_bb1]]: // CHECK-NEXT: br label %[[SIMPLE_bb2:[0-9]+]] ^bb1: // pred: ^bb0 - %0 = llvm.mlir.constant(1 : index) : !llvm.i64 - %1 = llvm.mlir.constant(42 : index) : !llvm.i64 - llvm.br ^bb2(%0 : !llvm.i64) + %0 = llvm.mlir.constant(1 : index) : i64 + %1 = llvm.mlir.constant(42 : index) : i64 + llvm.br ^bb2(%0 : i64) // CHECK: [[SIMPLE_bb2]]: // CHECK-NEXT: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %[[SIMPLE_bb3:[0-9]+]] ], [ 1, %[[SIMPLE_bb1]] ] // CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 42 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %[[SIMPLE_bb3]], label %[[SIMPLE_bb4:[0-9]+]] -^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb3 - %3 = llvm.icmp "slt" %2, %1 : !llvm.i64 +^bb2(%2: i64): // 2 preds: ^bb1, ^bb3 + %3 = llvm.icmp "slt" %2, %1 : i64 llvm.cond_br %3, ^bb3, ^bb4 // CHECK: [[SIMPLE_bb3]]: @@ -129,10 +129,10 @@ // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1 // CHECK-NEXT: br label %[[SIMPLE_bb2]] ^bb3: // pred: ^bb2 - llvm.call @body(%2) : (!llvm.i64) -> () - %4 = llvm.mlir.constant(1 : index) : !llvm.i64 - %5 = llvm.add %2, %4 : !llvm.i64 - llvm.br ^bb2(%5 : !llvm.i64) + llvm.call @body(%2) : (i64) -> () + %4 = llvm.mlir.constant(1 : index) : i64 + %5 = llvm.add %2, %4 : i64 + llvm.br ^bb2(%5 : i64) // CHECK: [[SIMPLE_bb4]]: // CHECK-NEXT: ret void @@ -168,29 +168,29 @@ } // CHECK-LABEL: declare i64 @body_args(i64) -llvm.func @body_args(!llvm.i64) -> !llvm.i64 +llvm.func @body_args(i64) -> i64 // CHECK-LABEL: declare i32 @other(i64, i32) -llvm.func @other(!llvm.i64, !llvm.i32) -> !llvm.i32 +llvm.func @other(i64, i32) -> i32 // CHECK-LABEL: define i32 @func_args(i32 {{%.*}}, i32 {{%.*}}) // CHECK-NEXT: br label %[[ARGS_bb1:[0-9]+]] -llvm.func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 { - %0 = llvm.mlir.constant(0 : i32) : !llvm.i32 +llvm.func @func_args(%arg0: i32, %arg1: i32) -> i32 { + %0 = llvm.mlir.constant(0 : i32) : i32 llvm.br ^bb1 // CHECK: [[ARGS_bb1]]: // CHECK-NEXT: br label %[[ARGS_bb2:[0-9]+]] ^bb1: // pred: ^bb0 - %1 = llvm.mlir.constant(0 : index) : !llvm.i64 - %2 = llvm.mlir.constant(42 : index) : !llvm.i64 - llvm.br ^bb2(%1 : !llvm.i64) + %1 = llvm.mlir.constant(0 : index) : i64 + %2 = llvm.mlir.constant(42 : index) : i64 + llvm.br ^bb2(%1 : i64) // CHECK: [[ARGS_bb2]]: // CHECK-NEXT: %5 = phi i64 [ %12, %[[ARGS_bb3:[0-9]+]] ], [ 0, %[[ARGS_bb1]] ] // CHECK-NEXT: %6 = icmp slt i64 %5, 42 // CHECK-NEXT: br i1 %6, label %[[ARGS_bb3]], label %[[ARGS_bb4:[0-9]+]] -^bb2(%3: !llvm.i64): // 2 preds: ^bb1, ^bb3 - %4 = llvm.icmp "slt" %3, %2 : !llvm.i64 +^bb2(%3: i64): // 2 preds: ^bb1, ^bb3 + %4 = llvm.icmp "slt" %3, %2 : i64 llvm.cond_br %4, ^bb3, ^bb4 // CHECK: [[ARGS_bb3]]: @@ -201,31 +201,31 @@ // CHECK-NEXT: %12 = add i64 %5, 1 // CHECK-NEXT: br label %[[ARGS_bb2]] ^bb3: // pred: ^bb2 - %5 = llvm.call @body_args(%3) : (!llvm.i64) -> !llvm.i64 - %6 = llvm.call @other(%5, %arg0) : (!llvm.i64, !llvm.i32) -> !llvm.i32 - %7 = llvm.call @other(%5, %6) : (!llvm.i64, !llvm.i32) -> !llvm.i32 - %8 = llvm.call @other(%5, %arg1) : (!llvm.i64, !llvm.i32) -> !llvm.i32 - %9 = llvm.mlir.constant(1 : index) : !llvm.i64 - %10 = llvm.add %3, %9 : !llvm.i64 - llvm.br ^bb2(%10 : !llvm.i64) + %5 = llvm.call @body_args(%3) : (i64) -> i64 + %6 = llvm.call @other(%5, %arg0) : (i64, i32) -> i32 + %7 = llvm.call @other(%5, %6) : (i64, i32) -> i32 + %8 = llvm.call @other(%5, %arg1) : (i64, i32) -> i32 + %9 = llvm.mlir.constant(1 : index) : i64 + %10 = llvm.add %3, %9 : i64 + llvm.br ^bb2(%10 : i64) // CHECK: [[ARGS_bb4]]: // CHECK-NEXT: %14 = call i32 @other(i64 0, i32 0) // CHECK-NEXT: ret i32 %14 ^bb4: // pred: ^bb2 - %11 = llvm.mlir.constant(0 : index) : !llvm.i64 - %12 = llvm.call @other(%11, %0) : (!llvm.i64, !llvm.i32) -> !llvm.i32 - llvm.return %12 : !llvm.i32 + %11 = llvm.mlir.constant(0 : index) : i64 + %12 = llvm.call @other(%11, %0) : (i64, i32) -> i32 + llvm.return %12 : i32 } // CHECK: declare void @pre(i64) -llvm.func @pre(!llvm.i64) +llvm.func @pre(i64) // CHECK: declare void @body2(i64, i64) -llvm.func @body2(!llvm.i64, !llvm.i64) +llvm.func @body2(i64, i64) // CHECK: declare void @post(i64) -llvm.func @post(!llvm.i64) +llvm.func @post(i64) // CHECK-LABEL: define void @imperfectly_nested_loops() // CHECK-NEXT: br label %[[IMPER_bb1:[0-9]+]] @@ -235,38 +235,38 @@ // CHECK: [[IMPER_bb1]]: // CHECK-NEXT: br label %[[IMPER_bb2:[0-9]+]] ^bb1: // pred: ^bb0 - %0 = llvm.mlir.constant(0 : index) : !llvm.i64 - %1 = llvm.mlir.constant(42 : index) : !llvm.i64 - llvm.br ^bb2(%0 : !llvm.i64) + %0 = llvm.mlir.constant(0 : index) : i64 + %1 = llvm.mlir.constant(42 : index) : i64 + llvm.br ^bb2(%0 : i64) // CHECK: [[IMPER_bb2]]: // CHECK-NEXT: %3 = phi i64 [ %13, %[[IMPER_bb7:[0-9]+]] ], [ 0, %[[IMPER_bb1]] ] // CHECK-NEXT: %4 = icmp slt i64 %3, 42 // CHECK-NEXT: br i1 %4, label %[[IMPER_bb3:[0-9]+]], label %[[IMPER_bb8:[0-9]+]] -^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb7 - %3 = llvm.icmp "slt" %2, %1 : !llvm.i64 +^bb2(%2: i64): // 2 preds: ^bb1, ^bb7 + %3 = llvm.icmp "slt" %2, %1 : i64 llvm.cond_br %3, ^bb3, ^bb8 // CHECK: [[IMPER_bb3]]: // CHECK-NEXT: call void @pre(i64 %3) // CHECK-NEXT: br label %[[IMPER_bb4:[0-9]+]] ^bb3: // pred: ^bb2 - llvm.call @pre(%2) : (!llvm.i64) -> () + llvm.call @pre(%2) : (i64) -> () llvm.br ^bb4 // CHECK: [[IMPER_bb4]]: // CHECK-NEXT: br label %[[IMPER_bb5:[0-9]+]] ^bb4: // pred: ^bb3 - %4 = llvm.mlir.constant(7 : index) : !llvm.i64 - %5 = llvm.mlir.constant(56 : index) : !llvm.i64 - llvm.br ^bb5(%4 : !llvm.i64) + %4 = llvm.mlir.constant(7 : index) : i64 + %5 = llvm.mlir.constant(56 : index) : i64 + llvm.br ^bb5(%4 : i64) // CHECK: [[IMPER_bb5]]: // CHECK-NEXT: %8 = phi i64 [ %11, %[[IMPER_bb6:[0-9]+]] ], [ 7, %[[IMPER_bb4]] ] // CHECK-NEXT: %9 = icmp slt i64 %8, 56 // CHECK-NEXT: br i1 %9, label %[[IMPER_bb6]], label %[[IMPER_bb7]] -^bb5(%6: !llvm.i64): // 2 preds: ^bb4, ^bb6 - %7 = llvm.icmp "slt" %6, %5 : !llvm.i64 +^bb5(%6: i64): // 2 preds: ^bb4, ^bb6 + %7 = llvm.icmp "slt" %6, %5 : i64 llvm.cond_br %7, ^bb6, ^bb7 // CHECK: [[IMPER_bb6]]: @@ -274,20 +274,20 @@ // CHECK-NEXT: %11 = add i64 %8, 2 // CHECK-NEXT: br label %[[IMPER_bb5]] ^bb6: // pred: ^bb5 - llvm.call @body2(%2, %6) : (!llvm.i64, !llvm.i64) -> () - %8 = llvm.mlir.constant(2 : index) : !llvm.i64 - %9 = llvm.add %6, %8 : !llvm.i64 - llvm.br ^bb5(%9 : !llvm.i64) + llvm.call @body2(%2, %6) : (i64, i64) -> () + %8 = llvm.mlir.constant(2 : index) : i64 + %9 = llvm.add %6, %8 : i64 + llvm.br ^bb5(%9 : i64) // CHECK: [[IMPER_bb7]]: // CHECK-NEXT: call void @post(i64 %3) // CHECK-NEXT: %13 = add i64 %3, 1 // CHECK-NEXT: br label %[[IMPER_bb2]] ^bb7: // pred: ^bb5 - llvm.call @post(%2) : (!llvm.i64) -> () - %10 = llvm.mlir.constant(1 : index) : !llvm.i64 - %11 = llvm.add %2, %10 : !llvm.i64 - llvm.br ^bb2(%11 : !llvm.i64) + llvm.call @post(%2) : (i64) -> () + %10 = llvm.mlir.constant(1 : index) : i64 + %11 = llvm.add %2, %10 : i64 + llvm.br ^bb2(%11 : i64) // CHECK: [[IMPER_bb8]]: // CHECK-NEXT: ret void @@ -296,10 +296,10 @@ } // CHECK: declare void @mid(i64) -llvm.func @mid(!llvm.i64) +llvm.func @mid(i64) // CHECK: declare void @body3(i64, i64) -llvm.func @body3(!llvm.i64, !llvm.i64) +llvm.func @body3(i64, i64) // A complete function transformation check. // CHECK-LABEL: define void @more_imperfectly_nested_loops() @@ -346,47 +346,47 @@ llvm.func @more_imperfectly_nested_loops() { llvm.br ^bb1 ^bb1: // pred: ^bb0 - %0 = llvm.mlir.constant(0 : index) : !llvm.i64 - %1 = llvm.mlir.constant(42 : index) : !llvm.i64 - llvm.br ^bb2(%0 : !llvm.i64) -^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb11 - %3 = llvm.icmp "slt" %2, %1 : !llvm.i64 + %0 = llvm.mlir.constant(0 : index) : i64 + %1 = llvm.mlir.constant(42 : index) : i64 + llvm.br ^bb2(%0 : i64) +^bb2(%2: i64): // 2 preds: ^bb1, ^bb11 + %3 = llvm.icmp "slt" %2, %1 : i64 llvm.cond_br %3, ^bb3, ^bb12 ^bb3: // pred: ^bb2 - llvm.call @pre(%2) : (!llvm.i64) -> () + llvm.call @pre(%2) : (i64) -> () llvm.br ^bb4 ^bb4: // pred: ^bb3 - %4 = llvm.mlir.constant(7 : index) : !llvm.i64 - %5 = llvm.mlir.constant(56 : index) : !llvm.i64 - llvm.br ^bb5(%4 : !llvm.i64) -^bb5(%6: !llvm.i64): // 2 preds: ^bb4, ^bb6 - %7 = llvm.icmp "slt" %6, %5 : !llvm.i64 + %4 = llvm.mlir.constant(7 : index) : i64 + %5 = llvm.mlir.constant(56 : index) : i64 + llvm.br ^bb5(%4 : i64) +^bb5(%6: i64): // 2 preds: ^bb4, ^bb6 + %7 = llvm.icmp "slt" %6, %5 : i64 llvm.cond_br %7, ^bb6, ^bb7 ^bb6: // pred: ^bb5 - llvm.call @body2(%2, %6) : (!llvm.i64, !llvm.i64) -> () - %8 = llvm.mlir.constant(2 : index) : !llvm.i64 - %9 = llvm.add %6, %8 : !llvm.i64 - llvm.br ^bb5(%9 : !llvm.i64) + llvm.call @body2(%2, %6) : (i64, i64) -> () + %8 = llvm.mlir.constant(2 : index) : i64 + %9 = llvm.add %6, %8 : i64 + llvm.br ^bb5(%9 : i64) ^bb7: // pred: ^bb5 - llvm.call @mid(%2) : (!llvm.i64) -> () + llvm.call @mid(%2) : (i64) -> () llvm.br ^bb8 ^bb8: // pred: ^bb7 - %10 = llvm.mlir.constant(18 : index) : !llvm.i64 - %11 = llvm.mlir.constant(37 : index) : !llvm.i64 - llvm.br ^bb9(%10 : !llvm.i64) -^bb9(%12: !llvm.i64): // 2 preds: ^bb8, ^bb10 - %13 = llvm.icmp "slt" %12, %11 : !llvm.i64 + %10 = llvm.mlir.constant(18 : index) : i64 + %11 = llvm.mlir.constant(37 : index) : i64 + llvm.br ^bb9(%10 : i64) +^bb9(%12: i64): // 2 preds: ^bb8, ^bb10 + %13 = llvm.icmp "slt" %12, %11 : i64 llvm.cond_br %13, ^bb10, ^bb11 ^bb10: // pred: ^bb9 - llvm.call @body3(%2, %12) : (!llvm.i64, !llvm.i64) -> () - %14 = llvm.mlir.constant(3 : index) : !llvm.i64 - %15 = llvm.add %12, %14 : !llvm.i64 - llvm.br ^bb9(%15 : !llvm.i64) + llvm.call @body3(%2, %12) : (i64, i64) -> () + %14 = llvm.mlir.constant(3 : index) : i64 + %15 = llvm.add %12, %14 : i64 + llvm.br ^bb9(%15 : i64) ^bb11: // pred: ^bb9 - llvm.call @post(%2) : (!llvm.i64) -> () - %16 = llvm.mlir.constant(1 : index) : !llvm.i64 - %17 = llvm.add %2, %16 : !llvm.i64 - llvm.br ^bb2(%17 : !llvm.i64) + llvm.call @post(%2) : (i64) -> () + %16 = llvm.mlir.constant(1 : index) : i64 + %17 = llvm.add %2, %16 : i64 + llvm.br ^bb2(%17 : i64) ^bb12: // pred: ^bb2 llvm.return } @@ -411,13 +411,13 @@ // CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 400) // CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float* // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0 - %0 = llvm.mlir.constant(10 : index) : !llvm.i64 - %1 = llvm.mlir.constant(10 : index) : !llvm.i64 - %2 = llvm.mul %0, %1 : !llvm.i64 + %0 = llvm.mlir.constant(10 : index) : i64 + %1 = llvm.mlir.constant(10 : index) : i64 + %2 = llvm.mul %0, %1 : i64 %3 = llvm.mlir.undef : !llvm.struct<(ptr)> - %4 = llvm.mlir.constant(4 : index) : !llvm.i64 - %5 = llvm.mul %2, %4 : !llvm.i64 - %6 = llvm.call @malloc(%5) : (!llvm.i64) -> !llvm.ptr + %4 = llvm.mlir.constant(4 : index) : i64 + %5 = llvm.mul %2, %4 : i64 + %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr %7 = llvm.bitcast %6 : !llvm.ptr to !llvm.ptr %8 = llvm.insertvalue %7, %3[0] : !llvm.struct<(ptr)> // CHECK-NEXT: ret void @@ -425,7 +425,7 @@ } // CHECK-LABEL: declare i64 @get_index() -llvm.func @get_index() -> !llvm.i64 +llvm.func @get_index() -> i64 // CHECK-LABEL: define void @store_load_static() llvm.func @store_load_static() { @@ -433,79 +433,79 @@ // CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 40) // CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float* // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0 - %0 = llvm.mlir.constant(10 : index) : !llvm.i64 + %0 = llvm.mlir.constant(10 : index) : i64 %1 = llvm.mlir.undef : !llvm.struct<(ptr)> - %2 = llvm.mlir.constant(4 : index) : !llvm.i64 - %3 = llvm.mul %0, %2 : !llvm.i64 - %4 = llvm.call @malloc(%3) : (!llvm.i64) -> !llvm.ptr + %2 = llvm.mlir.constant(4 : index) : i64 + %3 = llvm.mul %0, %2 : i64 + %4 = llvm.call @malloc(%3) : (i64) -> !llvm.ptr %5 = llvm.bitcast %4 : !llvm.ptr to !llvm.ptr %6 = llvm.insertvalue %5, %1[0] : !llvm.struct<(ptr)> %7 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float llvm.br ^bb1 ^bb1: // pred: ^bb0 - %8 = llvm.mlir.constant(0 : index) : !llvm.i64 - %9 = llvm.mlir.constant(10 : index) : !llvm.i64 - llvm.br ^bb2(%8 : !llvm.i64) + %8 = llvm.mlir.constant(0 : index) : i64 + %9 = llvm.mlir.constant(10 : index) : i64 + llvm.br ^bb2(%8 : i64) // CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ] -^bb2(%10: !llvm.i64): // 2 preds: ^bb1, ^bb3 +^bb2(%10: i64): // 2 preds: ^bb1, ^bb3 // CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 10 - %11 = llvm.icmp "slt" %10, %9 : !llvm.i64 + %11 = llvm.icmp "slt" %10, %9 : i64 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}} llvm.cond_br %11, ^bb3, ^bb4 ^bb3: // pred: ^bb2 // CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}} // CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}} - %12 = llvm.mlir.constant(10 : index) : !llvm.i64 + %12 = llvm.mlir.constant(10 : index) : i64 %13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr)> - %14 = llvm.getelementptr %13[%10] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %14 = llvm.getelementptr %13[%10] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %7, %14 : !llvm.ptr - %15 = llvm.mlir.constant(1 : index) : !llvm.i64 + %15 = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1 - %16 = llvm.add %10, %15 : !llvm.i64 + %16 = llvm.add %10, %15 : i64 // CHECK-NEXT: br label %{{[0-9]+}} - llvm.br ^bb2(%16 : !llvm.i64) + llvm.br ^bb2(%16 : i64) ^bb4: // pred: ^bb2 llvm.br ^bb5 ^bb5: // pred: ^bb4 - %17 = llvm.mlir.constant(0 : index) : !llvm.i64 - %18 = llvm.mlir.constant(10 : index) : !llvm.i64 - llvm.br ^bb6(%17 : !llvm.i64) + %17 = llvm.mlir.constant(0 : index) : i64 + %18 = llvm.mlir.constant(10 : index) : i64 + llvm.br ^bb6(%17 : i64) // CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ] -^bb6(%19: !llvm.i64): // 2 preds: ^bb5, ^bb7 +^bb6(%19: i64): // 2 preds: ^bb5, ^bb7 // CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 10 - %20 = llvm.icmp "slt" %19, %18 : !llvm.i64 + %20 = llvm.icmp "slt" %19, %18 : i64 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}} llvm.cond_br %20, ^bb7, ^bb8 ^bb7: // pred: ^bb6 // CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}} // CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}} - %21 = llvm.mlir.constant(10 : index) : !llvm.i64 + %21 = llvm.mlir.constant(10 : index) : i64 %22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr)> - %23 = llvm.getelementptr %22[%19] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %23 = llvm.getelementptr %22[%19] : (!llvm.ptr, i64) -> !llvm.ptr %24 = llvm.load %23 : !llvm.ptr - %25 = llvm.mlir.constant(1 : index) : !llvm.i64 + %25 = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1 - %26 = llvm.add %19, %25 : !llvm.i64 + %26 = llvm.add %19, %25 : i64 // CHECK-NEXT: br label %{{[0-9]+}} - llvm.br ^bb6(%26 : !llvm.i64) + llvm.br ^bb6(%26 : i64) ^bb8: // pred: ^bb6 // CHECK: ret void llvm.return } // CHECK-LABEL: define void @store_load_dynamic(i64 {{%.*}}) -llvm.func @store_load_dynamic(%arg0: !llvm.i64) { +llvm.func @store_load_dynamic(%arg0: i64) { // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4 // CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}}) // CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float* // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1 %0 = llvm.mlir.undef : !llvm.struct<(ptr, i64)> - %1 = llvm.mlir.constant(4 : index) : !llvm.i64 - %2 = llvm.mul %arg0, %1 : !llvm.i64 - %3 = llvm.call @malloc(%2) : (!llvm.i64) -> !llvm.ptr + %1 = llvm.mlir.constant(4 : index) : i64 + %2 = llvm.mul %arg0, %1 : i64 + %3 = llvm.call @malloc(%2) : (i64) -> !llvm.ptr %4 = llvm.bitcast %3 : !llvm.ptr to !llvm.ptr %5 = llvm.insertvalue %4, %0[0] : !llvm.struct<(ptr, i64)> %6 = llvm.insertvalue %arg0, %5[1] : !llvm.struct<(ptr, i64)> @@ -513,12 +513,12 @@ // CHECK-NEXT: br label %{{[0-9]+}} llvm.br ^bb1 ^bb1: // pred: ^bb0 - %8 = llvm.mlir.constant(0 : index) : !llvm.i64 - llvm.br ^bb2(%8 : !llvm.i64) + %8 = llvm.mlir.constant(0 : index) : i64 + llvm.br ^bb2(%8 : i64) // CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ] -^bb2(%9: !llvm.i64): // 2 preds: ^bb1, ^bb3 +^bb2(%9: i64): // 2 preds: ^bb1, ^bb3 // CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, %{{[0-9]+}} - %10 = llvm.icmp "slt" %9, %arg0 : !llvm.i64 + %10 = llvm.icmp "slt" %9, %arg0 : i64 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}} llvm.cond_br %10, ^bb3, ^bb4 ^bb3: // pred: ^bb2 @@ -528,22 +528,22 @@ // CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}} %11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr, i64)> %12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr, i64)> - %13 = llvm.getelementptr %12[%9] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %13 = llvm.getelementptr %12[%9] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %7, %13 : !llvm.ptr - %14 = llvm.mlir.constant(1 : index) : !llvm.i64 + %14 = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1 - %15 = llvm.add %9, %14 : !llvm.i64 + %15 = llvm.add %9, %14 : i64 // CHECK-NEXT: br label %{{[0-9]+}} - llvm.br ^bb2(%15 : !llvm.i64) + llvm.br ^bb2(%15 : i64) ^bb4: // pred: ^bb3 llvm.br ^bb5 ^bb5: // pred: ^bb4 - %16 = llvm.mlir.constant(0 : index) : !llvm.i64 - llvm.br ^bb6(%16 : !llvm.i64) + %16 = llvm.mlir.constant(0 : index) : i64 + llvm.br ^bb6(%16 : i64) // CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ] -^bb6(%17: !llvm.i64): // 2 preds: ^bb5, ^bb7 +^bb6(%17: i64): // 2 preds: ^bb5, ^bb7 // CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, %{{[0-9]+}} - %18 = llvm.icmp "slt" %17, %arg0 : !llvm.i64 + %18 = llvm.icmp "slt" %17, %arg0 : i64 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}} llvm.cond_br %18, ^bb7, ^bb8 ^bb7: // pred: ^bb6 @@ -553,21 +553,21 @@ // CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}} %19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr, i64)> %20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr, i64)> - %21 = llvm.getelementptr %20[%17] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %21 = llvm.getelementptr %20[%17] : (!llvm.ptr, i64) -> !llvm.ptr %22 = llvm.load %21 : !llvm.ptr - %23 = llvm.mlir.constant(1 : index) : !llvm.i64 + %23 = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1 - %24 = llvm.add %17, %23 : !llvm.i64 + %24 = llvm.add %17, %23 : i64 // CHECK-NEXT: br label %{{[0-9]+}} - llvm.br ^bb6(%24 : !llvm.i64) + llvm.br ^bb6(%24 : i64) ^bb8: // pred: ^bb6 // CHECK: ret void llvm.return } // CHECK-LABEL: define void @store_load_mixed(i64 {{%.*}}) -llvm.func @store_load_mixed(%arg0: !llvm.i64) { - %0 = llvm.mlir.constant(10 : index) : !llvm.i64 +llvm.func @store_load_mixed(%arg0: i64) { + %0 = llvm.mlir.constant(10 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = mul i64 2, %{{[0-9]+}} // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 10 @@ -577,15 +577,15 @@ // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } undef, float* %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 10, 2 - %1 = llvm.mlir.constant(2 : index) : !llvm.i64 - %2 = llvm.mlir.constant(4 : index) : !llvm.i64 - %3 = llvm.mul %1, %arg0 : !llvm.i64 - %4 = llvm.mul %3, %2 : !llvm.i64 - %5 = llvm.mul %4, %0 : !llvm.i64 + %1 = llvm.mlir.constant(2 : index) : i64 + %2 = llvm.mlir.constant(4 : index) : i64 + %3 = llvm.mul %1, %arg0 : i64 + %4 = llvm.mul %3, %2 : i64 + %5 = llvm.mul %4, %0 : i64 %6 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i64)> - %7 = llvm.mlir.constant(4 : index) : !llvm.i64 - %8 = llvm.mul %5, %7 : !llvm.i64 - %9 = llvm.call @malloc(%8) : (!llvm.i64) -> !llvm.ptr + %7 = llvm.mlir.constant(4 : index) : i64 + %8 = llvm.mul %5, %7 : i64 + %9 = llvm.call @malloc(%8) : (i64) -> !llvm.ptr %10 = llvm.bitcast %9 : !llvm.ptr to !llvm.ptr %11 = llvm.insertvalue %10, %6[0] : !llvm.struct<(ptr, i64, i64)> %12 = llvm.insertvalue %arg0, %11[1] : !llvm.struct<(ptr, i64, i64)> @@ -593,12 +593,12 @@ // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index() // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index() - %14 = llvm.mlir.constant(1 : index) : !llvm.i64 - %15 = llvm.mlir.constant(2 : index) : !llvm.i64 - %16 = llvm.call @get_index() : () -> !llvm.i64 - %17 = llvm.call @get_index() : () -> !llvm.i64 + %14 = llvm.mlir.constant(1 : index) : i64 + %15 = llvm.mlir.constant(2 : index) : i64 + %16 = llvm.call @get_index() : () -> i64 + %17 = llvm.call @get_index() : () -> i64 %18 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float - %19 = llvm.mlir.constant(2 : index) : !llvm.i64 + %19 = llvm.mlir.constant(2 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2 // CHECK-NEXT: %{{[0-9]+}} = mul i64 1, %{{[0-9]+}} @@ -611,16 +611,16 @@ // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}} // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}} %20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr, i64, i64)> - %21 = llvm.mlir.constant(4 : index) : !llvm.i64 + %21 = llvm.mlir.constant(4 : index) : i64 %22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr, i64, i64)> - %23 = llvm.mul %14, %20 : !llvm.i64 - %24 = llvm.add %23, %15 : !llvm.i64 - %25 = llvm.mul %24, %21 : !llvm.i64 - %26 = llvm.add %25, %16 : !llvm.i64 - %27 = llvm.mul %26, %22 : !llvm.i64 - %28 = llvm.add %27, %17 : !llvm.i64 + %23 = llvm.mul %14, %20 : i64 + %24 = llvm.add %23, %15 : i64 + %25 = llvm.mul %24, %21 : i64 + %26 = llvm.add %25, %16 : i64 + %27 = llvm.mul %26, %22 : i64 + %28 = llvm.add %27, %17 : i64 %29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr, i64, i64)> - %30 = llvm.getelementptr %29[%28] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %30 = llvm.getelementptr %29[%28] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %18, %30 : !llvm.ptr // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2 @@ -633,18 +633,18 @@ // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}} // CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}} - %31 = llvm.mlir.constant(2 : index) : !llvm.i64 + %31 = llvm.mlir.constant(2 : index) : i64 %32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr, i64, i64)> - %33 = llvm.mlir.constant(4 : index) : !llvm.i64 + %33 = llvm.mlir.constant(4 : index) : i64 %34 = llvm.extractvalue %13[2] : !llvm.struct<(ptr, i64, i64)> - %35 = llvm.mul %17, %32 : !llvm.i64 - %36 = llvm.add %35, %16 : !llvm.i64 - %37 = llvm.mul %36, %33 : !llvm.i64 - %38 = llvm.add %37, %15 : !llvm.i64 - %39 = llvm.mul %38, %34 : !llvm.i64 - %40 = llvm.add %39, %14 : !llvm.i64 + %35 = llvm.mul %17, %32 : i64 + %36 = llvm.add %35, %16 : i64 + %37 = llvm.mul %36, %33 : i64 + %38 = llvm.add %37, %15 : i64 + %39 = llvm.mul %38, %34 : i64 + %40 = llvm.add %39, %14 : i64 %41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr, i64, i64)> - %42 = llvm.getelementptr %41[%40] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %42 = llvm.getelementptr %41[%40] : (!llvm.ptr, i64) -> !llvm.ptr %43 = llvm.load %42 : !llvm.ptr // CHECK-NEXT: ret void llvm.return @@ -652,16 +652,16 @@ // CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}}) llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr)>, %arg1: !llvm.struct<(ptr, i64)>, %arg2: !llvm.struct<(ptr, i64)>) -> !llvm.struct<(ptr, i64)> { - %0 = llvm.mlir.constant(7 : index) : !llvm.i64 + %0 = llvm.mlir.constant(7 : index) : i64 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index() - %1 = llvm.call @get_index() : () -> !llvm.i64 + %1 = llvm.call @get_index() : () -> i64 %2 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7 // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}} - %3 = llvm.mlir.constant(10 : index) : !llvm.i64 + %3 = llvm.mlir.constant(10 : index) : i64 %4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr)> - %5 = llvm.getelementptr %4[%0] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %5 = llvm.getelementptr %4[%0] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %2, %5 : !llvm.ptr // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0 @@ -669,7 +669,7 @@ // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}} %6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr, i64)> %7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr, i64)> - %8 = llvm.getelementptr %7[%0] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %8 = llvm.getelementptr %7[%0] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %2, %8 : !llvm.ptr // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1 // CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}} @@ -677,12 +677,12 @@ // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}} // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}} - %9 = llvm.mlir.constant(10 : index) : !llvm.i64 + %9 = llvm.mlir.constant(10 : index) : i64 %10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr, i64)> - %11 = llvm.mul %0, %10 : !llvm.i64 - %12 = llvm.add %11, %1 : !llvm.i64 + %11 = llvm.mul %0, %10 : i64 + %12 = llvm.add %11, %1 : i64 %13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr, i64)> - %14 = llvm.getelementptr %13[%12] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %14 = llvm.getelementptr %13[%12] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %2, %14 : !llvm.ptr // CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}} // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4 @@ -690,12 +690,12 @@ // CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float* // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1 - %15 = llvm.mlir.constant(10 : index) : !llvm.i64 - %16 = llvm.mul %15, %1 : !llvm.i64 + %15 = llvm.mlir.constant(10 : index) : i64 + %16 = llvm.mul %15, %1 : i64 %17 = llvm.mlir.undef : !llvm.struct<(ptr, i64)> - %18 = llvm.mlir.constant(4 : index) : !llvm.i64 - %19 = llvm.mul %16, %18 : !llvm.i64 - %20 = llvm.call @malloc(%19) : (!llvm.i64) -> !llvm.ptr + %18 = llvm.mlir.constant(4 : index) : i64 + %19 = llvm.mul %16, %18 : i64 + %20 = llvm.call @malloc(%19) : (i64) -> !llvm.ptr %21 = llvm.bitcast %20 : !llvm.ptr to !llvm.ptr %22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr, i64)> %23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr, i64)> @@ -705,34 +705,34 @@ // CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}}) -llvm.func @memref_dim(%arg0: !llvm.struct<(ptr, i64, i64)>) -> !llvm.i64 { +llvm.func @memref_dim(%arg0: !llvm.struct<(ptr, i64, i64)>) -> i64 { // Expecting this to create an LLVM constant. - %0 = llvm.mlir.constant(42 : index) : !llvm.i64 + %0 = llvm.mlir.constant(42 : index) : i64 // CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1 %1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr, i64, i64)> // Expecting this to create an LLVM constant. - %2 = llvm.mlir.constant(10 : index) : !llvm.i64 + %2 = llvm.mlir.constant(10 : index) : i64 // CHECK-NEXT: %3 = extractvalue { float*, i64, i64 } %0, 2 %3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr, i64, i64)> // Checking that the constant for d0 has been created. // CHECK-NEXT: %4 = add i64 42, %2 - %4 = llvm.add %0, %1 : !llvm.i64 + %4 = llvm.add %0, %1 : i64 // Checking that the constant for d2 has been created. // CHECK-NEXT: %5 = add i64 10, %3 - %5 = llvm.add %2, %3 : !llvm.i64 + %5 = llvm.add %2, %3 : i64 // CHECK-NEXT: %6 = add i64 %4, %5 - %6 = llvm.add %4, %5 : !llvm.i64 + %6 = llvm.add %4, %5 : i64 // CHECK-NEXT: ret i64 %6 - llvm.return %6 : !llvm.i64 + llvm.return %6 : i64 } -llvm.func @get_i64() -> !llvm.i64 +llvm.func @get_i64() -> i64 llvm.func @get_f32() -> !llvm.float llvm.func @get_memref() -> !llvm.struct<(ptr, i64, i64)> // CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn() llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr, i64, i64)>)> { - %0 = llvm.call @get_i64() : () -> !llvm.i64 + %0 = llvm.call @get_i64() : () -> i64 %1 = llvm.call @get_f32() : () -> !llvm.float %2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr, i64, i64)> // CHECK: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } undef, i64 %{{[0-9]+}}, 0 @@ -757,26 +757,26 @@ %1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, float, struct<(ptr, i64, i64)>)> %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, float, struct<(ptr, i64, i64)>)> %3 = llvm.extractvalue %0[2] : !llvm.struct<(i64, float, struct<(ptr, i64, i64)>)> - %4 = llvm.mlir.constant(42) : !llvm.i64 + %4 = llvm.mlir.constant(42) : i64 // CHECK: add i64 [[ret0]], 42 - %5 = llvm.add %1, %4 : !llvm.i64 + %5 = llvm.add %1, %4 : i64 %6 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float // CHECK: fadd float [[ret1]], 4.200000e+01 %7 = llvm.fadd %2, %6 : !llvm.float - %8 = llvm.mlir.constant(0 : index) : !llvm.i64 - %9 = llvm.mlir.constant(42 : index) : !llvm.i64 + %8 = llvm.mlir.constant(0 : index) : i64 + %9 = llvm.mlir.constant(42 : index) : i64 // CHECK: extractvalue { float*, i64, i64 } [[ret2]], 0 %10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr, i64, i64)> - %11 = llvm.mlir.constant(10 : index) : !llvm.i64 + %11 = llvm.mlir.constant(10 : index) : i64 %12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr, i64, i64)> - %13 = llvm.mul %8, %10 : !llvm.i64 - %14 = llvm.add %13, %8 : !llvm.i64 - %15 = llvm.mul %14, %11 : !llvm.i64 - %16 = llvm.add %15, %8 : !llvm.i64 - %17 = llvm.mul %16, %12 : !llvm.i64 - %18 = llvm.add %17, %8 : !llvm.i64 + %13 = llvm.mul %8, %10 : i64 + %14 = llvm.add %13, %8 : i64 + %15 = llvm.mul %14, %11 : i64 + %16 = llvm.add %15, %8 : i64 + %17 = llvm.mul %16, %12 : i64 + %18 = llvm.add %17, %8 : i64 %19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr, i64, i64)> - %20 = llvm.getelementptr %19[%18] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %20 = llvm.getelementptr %19[%18] : (!llvm.ptr, i64) -> !llvm.ptr %21 = llvm.load %20 : !llvm.ptr llvm.return } @@ -845,23 +845,23 @@ } // CHECK-LABEL: @ops -llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3: !llvm.i32) -> !llvm.struct<(float, i32)> { +llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32, %arg3: i32) -> !llvm.struct<(float, i32)> { // CHECK-NEXT: fsub float %0, %1 %0 = llvm.fsub %arg0, %arg1 : !llvm.float // CHECK-NEXT: %6 = sub i32 %2, %3 - %1 = llvm.sub %arg2, %arg3 : !llvm.i32 + %1 = llvm.sub %arg2, %arg3 : i32 // CHECK-NEXT: %7 = icmp slt i32 %2, %6 - %2 = llvm.icmp "slt" %arg2, %1 : !llvm.i32 + %2 = llvm.icmp "slt" %arg2, %1 : i32 // CHECK-NEXT: %8 = select i1 %7, i32 %2, i32 %6 - %3 = llvm.select %2, %arg2, %1 : !llvm.i1, !llvm.i32 + %3 = llvm.select %2, %arg2, %1 : i1, i32 // CHECK-NEXT: %9 = sdiv i32 %2, %3 - %4 = llvm.sdiv %arg2, %arg3 : !llvm.i32 + %4 = llvm.sdiv %arg2, %arg3 : i32 // CHECK-NEXT: %10 = udiv i32 %2, %3 - %5 = llvm.udiv %arg2, %arg3 : !llvm.i32 + %5 = llvm.udiv %arg2, %arg3 : i32 // CHECK-NEXT: %11 = srem i32 %2, %3 - %6 = llvm.srem %arg2, %arg3 : !llvm.i32 + %6 = llvm.srem %arg2, %arg3 : i32 // CHECK-NEXT: %12 = urem i32 %2, %3 - %7 = llvm.urem %arg2, %arg3 : !llvm.i32 + %7 = llvm.urem %arg2, %arg3 : i32 %8 = llvm.mlir.undef : !llvm.struct<(float, i32)> %9 = llvm.insertvalue %0, %8[0] : !llvm.struct<(float, i32)> @@ -873,17 +873,17 @@ %12 = llvm.frem %arg0, %arg1 : !llvm.float // CHECK-NEXT: %17 = and i32 %2, %3 - %13 = llvm.and %arg2, %arg3 : !llvm.i32 + %13 = llvm.and %arg2, %arg3 : i32 // CHECK-NEXT: %18 = or i32 %2, %3 - %14 = llvm.or %arg2, %arg3 : !llvm.i32 + %14 = llvm.or %arg2, %arg3 : i32 // CHECK-NEXT: %19 = xor i32 %2, %3 - %15 = llvm.xor %arg2, %arg3 : !llvm.i32 + %15 = llvm.xor %arg2, %arg3 : i32 // CHECK-NEXT: %20 = shl i32 %2, %3 - %16 = llvm.shl %arg2, %arg3 : !llvm.i32 + %16 = llvm.shl %arg2, %arg3 : i32 // CHECK-NEXT: %21 = lshr i32 %2, %3 - %17 = llvm.lshr %arg2, %arg3 : !llvm.i32 + %17 = llvm.lshr %arg2, %arg3 : i32 // CHECK-NEXT: %22 = ashr i32 %2, %3 - %18 = llvm.ashr %arg2, %arg3 : !llvm.i32 + %18 = llvm.ashr %arg2, %arg3 : i32 // CHECK-NEXT: fneg float %0 %19 = llvm.fneg %arg0 : !llvm.float @@ -896,20 +896,20 @@ // // CHECK-LABEL: define void @indirect_const_call(i64 {{%.*}}) -llvm.func @indirect_const_call(%arg0: !llvm.i64) { +llvm.func @indirect_const_call(%arg0: i64) { // CHECK-NEXT: call void @body(i64 %0) %0 = llvm.mlir.addressof @body : !llvm.ptr> - llvm.call %0(%arg0) : (!llvm.i64) -> () + llvm.call %0(%arg0) : (i64) -> () // CHECK-NEXT: ret void llvm.return } // CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}}) -llvm.func @indirect_call(%arg0: !llvm.ptr>, %arg1: !llvm.float) -> !llvm.i32 { +llvm.func @indirect_call(%arg0: !llvm.ptr>, %arg1: !llvm.float) -> i32 { // CHECK-NEXT: %3 = call i32 %0(float %1) - %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> !llvm.i32 + %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> i32 // CHECK-NEXT: ret i32 %3 - llvm.return %0 : !llvm.i32 + llvm.return %0 : i32 } // @@ -918,20 +918,20 @@ // // CHECK-LABEL: define void @cond_br_arguments(i1 {{%.*}}, i1 {{%.*}}) -llvm.func @cond_br_arguments(%arg0: !llvm.i1, %arg1: !llvm.i1) { +llvm.func @cond_br_arguments(%arg0: i1, %arg1: i1) { // CHECK-NEXT: br i1 %0, label %3, label %5 - llvm.cond_br %arg0, ^bb1(%arg0 : !llvm.i1), ^bb2 + llvm.cond_br %arg0, ^bb1(%arg0 : i1), ^bb2 // CHECK: 3: // CHECK-NEXT: %4 = phi i1 [ %1, %5 ], [ %0, %2 ] -^bb1(%0 : !llvm.i1): +^bb1(%0 : i1): // CHECK-NEXT: ret void llvm.return // CHECK: 5: ^bb2: // CHECK-NEXT: br label %3 - llvm.br ^bb1(%arg1 : !llvm.i1) + llvm.br ^bb1(%arg1 : i1) } // CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}}) @@ -947,24 +947,24 @@ // CHECK-LABEL: @llvm_varargs(...) llvm.func @llvm_varargs(...) -llvm.func @intpointerconversion(%arg0 : !llvm.i32) -> !llvm.i32 { +llvm.func @intpointerconversion(%arg0 : i32) -> i32 { // CHECK: %2 = inttoptr i32 %0 to i32* // CHECK-NEXT: %3 = ptrtoint i32* %2 to i32 - %1 = llvm.inttoptr %arg0 : !llvm.i32 to !llvm.ptr - %2 = llvm.ptrtoint %1 : !llvm.ptr to !llvm.i32 - llvm.return %2 : !llvm.i32 + %1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr + %2 = llvm.ptrtoint %1 : !llvm.ptr to i32 + llvm.return %2 : i32 } -llvm.func @fpconversion(%arg0 : !llvm.i32) -> !llvm.i32 { +llvm.func @fpconversion(%arg0 : i32) -> i32 { // CHECK: %2 = sitofp i32 %0 to float // CHECK-NEXT: %3 = fptosi float %2 to i32 // CHECK-NEXT: %4 = uitofp i32 %3 to float // CHECK-NEXT: %5 = fptoui float %4 to i32 - %1 = llvm.sitofp %arg0 : !llvm.i32 to !llvm.float - %2 = llvm.fptosi %1 : !llvm.float to !llvm.i32 - %3 = llvm.uitofp %2 : !llvm.i32 to !llvm.float - %4 = llvm.fptoui %3 : !llvm.float to !llvm.i32 - llvm.return %4 : !llvm.i32 + %1 = llvm.sitofp %arg0 : i32 to !llvm.float + %2 = llvm.fptosi %1 : !llvm.float to i32 + %3 = llvm.uitofp %2 : i32 to !llvm.float + %4 = llvm.fptoui %3 : !llvm.float to i32 + llvm.return %4 : i32 } // CHECK-LABEL: @addrspace @@ -1019,33 +1019,33 @@ } // CHECK-LABEL: @vect -llvm.func @vect(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i32, %arg2: !llvm.float) { +llvm.func @vect(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) { // CHECK-NEXT: extractelement <4 x float> {{.*}}, i32 // CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i32 // CHECK-NEXT: shufflevector <4 x float> {{.*}}, <4 x float> {{.*}}, <5 x i32> - %0 = llvm.extractelement %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float> - %1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i32] : !llvm.vec<4 x float> + %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x float> + %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x float> %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float> llvm.return } // CHECK-LABEL: @vect_i64idx -llvm.func @vect_i64idx(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.i64, %arg2: !llvm.float) { +llvm.func @vect_i64idx(%arg0: !llvm.vec<4 x float>, %arg1: i64, %arg2: !llvm.float) { // CHECK-NEXT: extractelement <4 x float> {{.*}}, i64 // CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i64 - %0 = llvm.extractelement %arg0[%arg1 : !llvm.i64] : !llvm.vec<4 x float> - %1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i64] : !llvm.vec<4 x float> + %0 = llvm.extractelement %arg0[%arg1 : i64] : !llvm.vec<4 x float> + %1 = llvm.insertelement %arg2, %arg0[%arg1 : i64] : !llvm.vec<4 x float> llvm.return } // CHECK-LABEL: @alloca -llvm.func @alloca(%size : !llvm.i64) { +llvm.func @alloca(%size : i64) { // Alignment automatically set by the LLVM IR builder when alignment attribute // is 0. // CHECK: alloca {{.*}} align 4 - llvm.alloca %size x !llvm.i32 {alignment = 0} : (!llvm.i64) -> (!llvm.ptr) + llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr) // CHECK-NEXT: alloca {{.*}} align 8 - llvm.alloca %size x !llvm.i32 {alignment = 8} : (!llvm.i64) -> (!llvm.ptr) + llvm.alloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr) llvm.return } @@ -1057,24 +1057,24 @@ } // CHECK-LABEL: @fp_casts -llvm.func @fp_casts(%fp1 : !llvm.float, %fp2 : !llvm.double) -> !llvm.i16 { +llvm.func @fp_casts(%fp1 : !llvm.float, %fp2 : !llvm.double) -> i16 { // CHECK: fptrunc double {{.*}} to float %a = llvm.fptrunc %fp2 : !llvm.double to !llvm.float // CHECK: fpext float {{.*}} to double %b = llvm.fpext %fp1 : !llvm.float to !llvm.double // CHECK: fptosi double {{.*}} to i16 - %c = llvm.fptosi %b : !llvm.double to !llvm.i16 - llvm.return %c : !llvm.i16 + %c = llvm.fptosi %b : !llvm.double to i16 + llvm.return %c : i16 } // CHECK-LABEL: @integer_extension_and_truncation -llvm.func @integer_extension_and_truncation(%a : !llvm.i32) { +llvm.func @integer_extension_and_truncation(%a : i32) { // CHECK: sext i32 {{.*}} to i64 // CHECK: zext i32 {{.*}} to i64 // CHECK: trunc i32 {{.*}} to i16 - %0 = llvm.sext %a : !llvm.i32 to !llvm.i64 - %1 = llvm.zext %a : !llvm.i32 to !llvm.i64 - %2 = llvm.trunc %a : !llvm.i32 to !llvm.i16 + %0 = llvm.sext %a : i32 to i64 + %1 = llvm.zext %a : i32 to i64 + %2 = llvm.trunc %a : i32 to i16 llvm.return } @@ -1108,7 +1108,7 @@ // CHECK-LABEL: @atomicrmw llvm.func @atomicrmw( %f32_ptr : !llvm.ptr, %f32 : !llvm.float, - %i32_ptr : !llvm.ptr, %i32 : !llvm.i32) { + %i32_ptr : !llvm.ptr, %i32 : i32) { // CHECK: atomicrmw fadd float* %{{.*}}, float %{{.*}} unordered %0 = llvm.atomicrmw fadd %f32_ptr, %f32 unordered : !llvm.float // CHECK: atomicrmw fsub float* %{{.*}}, float %{{.*}} unordered @@ -1116,32 +1116,32 @@ // CHECK: atomicrmw xchg float* %{{.*}}, float %{{.*}} monotonic %2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.float // CHECK: atomicrmw add i32* %{{.*}}, i32 %{{.*}} acquire - %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : !llvm.i32 + %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : i32 // CHECK: atomicrmw sub i32* %{{.*}}, i32 %{{.*}} release - %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : !llvm.i32 + %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : i32 // CHECK: atomicrmw and i32* %{{.*}}, i32 %{{.*}} acq_rel - %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.i32 + %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : i32 // CHECK: atomicrmw nand i32* %{{.*}}, i32 %{{.*}} seq_cst - %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.i32 + %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : i32 // CHECK: atomicrmw or i32* %{{.*}}, i32 %{{.*}} unordered - %7 = llvm.atomicrmw _or %i32_ptr, %i32 unordered : !llvm.i32 + %7 = llvm.atomicrmw _or %i32_ptr, %i32 unordered : i32 // CHECK: atomicrmw xor i32* %{{.*}}, i32 %{{.*}} unordered - %8 = llvm.atomicrmw _xor %i32_ptr, %i32 unordered : !llvm.i32 + %8 = llvm.atomicrmw _xor %i32_ptr, %i32 unordered : i32 // CHECK: atomicrmw max i32* %{{.*}}, i32 %{{.*}} unordered - %9 = llvm.atomicrmw max %i32_ptr, %i32 unordered : !llvm.i32 + %9 = llvm.atomicrmw max %i32_ptr, %i32 unordered : i32 // CHECK: atomicrmw min i32* %{{.*}}, i32 %{{.*}} unordered - %10 = llvm.atomicrmw min %i32_ptr, %i32 unordered : !llvm.i32 + %10 = llvm.atomicrmw min %i32_ptr, %i32 unordered : i32 // CHECK: atomicrmw umax i32* %{{.*}}, i32 %{{.*}} unordered - %11 = llvm.atomicrmw umax %i32_ptr, %i32 unordered : !llvm.i32 + %11 = llvm.atomicrmw umax %i32_ptr, %i32 unordered : i32 // CHECK: atomicrmw umin i32* %{{.*}}, i32 %{{.*}} unordered - %12 = llvm.atomicrmw umin %i32_ptr, %i32 unordered : !llvm.i32 + %12 = llvm.atomicrmw umin %i32_ptr, %i32 unordered : i32 llvm.return } // CHECK-LABEL: @cmpxchg -llvm.func @cmpxchg(%ptr : !llvm.ptr, %cmp : !llvm.i32, %val: !llvm.i32) { +llvm.func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %val: i32) { // CHECK: cmpxchg i32* %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic - %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : !llvm.i32 + %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : i32 // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 0 %1 = llvm.extractvalue %0[0] : !llvm.struct<(i32, i1)> // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 1 @@ -1152,18 +1152,18 @@ llvm.mlir.global external constant @_ZTIi() : !llvm.ptr llvm.func @foo(!llvm.ptr) llvm.func @bar(!llvm.ptr) -> !llvm.ptr -llvm.func @__gxx_personality_v0(...) -> !llvm.i32 +llvm.func @__gxx_personality_v0(...) -> i32 // CHECK-LABEL: @invokeLandingpad -llvm.func @invokeLandingpad() -> !llvm.i32 attributes { personality = @__gxx_personality_v0 } { +llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personality_v0 } { // CHECK: %[[a1:[0-9]+]] = alloca i8 - %0 = llvm.mlir.constant(0 : i32) : !llvm.i32 + %0 = llvm.mlir.constant(0 : i32) : i32 %1 = llvm.mlir.constant("\01") : !llvm.array<1 x i8> %2 = llvm.mlir.addressof @_ZTIi : !llvm.ptr> %3 = llvm.bitcast %2 : !llvm.ptr> to !llvm.ptr %4 = llvm.mlir.null : !llvm.ptr> - %5 = llvm.mlir.constant(1 : i32) : !llvm.i32 - %6 = llvm.alloca %5 x !llvm.i8 : (!llvm.i32) -> !llvm.ptr + %5 = llvm.mlir.constant(1 : i32) : i32 + %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr // CHECK: invoke void @foo(i8* %[[a1]]) // CHECK-NEXT: to label %[[normal:[0-9]+]] unwind label %[[unwind:[0-9]+]] llvm.invoke @foo(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> () @@ -1181,7 +1181,7 @@ // CHECK: [[normal]]: // CHECK-NEXT: ret i32 1 ^bb2: // 2 preds: ^bb0, ^bb3 - llvm.return %5 : !llvm.i32 + llvm.return %5 : i32 // CHECK: [[final]]: // CHECK-NEXT: %{{[0-9]+}} = invoke i8* @bar(i8* %[[a1]]) @@ -1191,22 +1191,22 @@ } // CHECK-LABEL: @callFreezeOp -llvm.func @callFreezeOp(%x : !llvm.i32) { +llvm.func @callFreezeOp(%x : i32) { // CHECK: freeze i32 %{{[0-9]+}} - %0 = llvm.freeze %x : !llvm.i32 - %1 = llvm.mlir.undef : !llvm.i32 + %0 = llvm.freeze %x : i32 + %1 = llvm.mlir.undef : i32 // CHECK: freeze i32 undef - %2 = llvm.freeze %1 : !llvm.i32 + %2 = llvm.freeze %1 : i32 llvm.return } // CHECK-LABEL: @boolConstArg -llvm.func @boolConstArg() -> !llvm.i1 { +llvm.func @boolConstArg() -> i1 { // CHECK: ret i1 false - %0 = llvm.mlir.constant(true) : !llvm.i1 - %1 = llvm.mlir.constant(false) : !llvm.i1 - %2 = llvm.and %0, %1 : !llvm.i1 - llvm.return %2 : !llvm.i1 + %0 = llvm.mlir.constant(true) : i1 + %1 = llvm.mlir.constant(false) : i1 + %2 = llvm.and %0, %1 : i1 + llvm.return %2 : i1 } // CHECK-LABEL: @callFenceInst @@ -1256,13 +1256,13 @@ // ----- // Check that branch weight attributes are exported properly as metadata. -llvm.func @cond_br_weights(%cond : !llvm.i1, %arg0 : !llvm.i32, %arg1 : !llvm.i32) -> !llvm.i32 { +llvm.func @cond_br_weights(%cond : i1, %arg0 : i32, %arg1 : i32) -> i32 { // CHECK: !prof ![[NODE:[0-9]+]] llvm.cond_br %cond weights(dense<[5, 10]> : vector<2xi32>), ^bb1, ^bb2 ^bb1: // pred: ^bb0 - llvm.return %arg0 : !llvm.i32 + llvm.return %arg0 : i32 ^bb2: // pred: ^bb0 - llvm.return %arg1 : !llvm.i32 + llvm.return %arg1 : i32 } // CHECK: ![[NODE]] = !{!"branch_weights", i32 5, i32 10} @@ -1270,9 +1270,9 @@ // ----- llvm.func @volatile_store_and_load() { - %val = llvm.mlir.constant(5 : i32) : !llvm.i32 - %size = llvm.mlir.constant(1 : i64) : !llvm.i64 - %0 = llvm.alloca %size x !llvm.i32 : (!llvm.i64) -> (!llvm.ptr) + %val = llvm.mlir.constant(5 : i32) : i32 + %size = llvm.mlir.constant(1 : i64) : i64 + %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr) // CHECK: store volatile i32 5, i32* %{{.*}} llvm.store volatile %val, %0 : !llvm.ptr // CHECK: %{{.*}} = load volatile i32, i32* %{{.*}} @@ -1284,9 +1284,9 @@ // Check that nontemporal attribute is exported as metadata node. llvm.func @nontemporal_store_and_load() { - %val = llvm.mlir.constant(5 : i32) : !llvm.i32 - %size = llvm.mlir.constant(1 : i64) : !llvm.i64 - %0 = llvm.alloca %size x !llvm.i32 : (!llvm.i64) -> (!llvm.ptr) + %val = llvm.mlir.constant(5 : i32) : i32 + %size = llvm.mlir.constant(1 : i64) : i64 + %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr) // CHECK: !nontemporal ![[NODE:[0-9]+]] llvm.store %val, %0 {nontemporal} : !llvm.ptr // CHECK: !nontemporal ![[NODE]] @@ -1330,30 +1330,30 @@ // ----- // CHECK-LABEL: @useInlineAsm -llvm.func @useInlineAsm(%arg0: !llvm.i32) { +llvm.func @useInlineAsm(%arg0: i32) { // Constraints string is checked at LLVM InlineAsm instruction construction time. // So we can't just use "bar" everywhere, number of in/out arguments has to match. // CHECK-NEXT: call void asm "foo", "r"(i32 {{.*}}), !dbg !7 - llvm.inline_asm "foo", "r" %arg0 : (!llvm.i32) -> () + llvm.inline_asm "foo", "r" %arg0 : (i32) -> () // CHECK-NEXT: call i8 asm "foo", "=r,r"(i32 {{.*}}), !dbg !9 - %0 = llvm.inline_asm "foo", "=r,r" %arg0 : (!llvm.i32) -> !llvm.i8 + %0 = llvm.inline_asm "foo", "=r,r" %arg0 : (i32) -> i8 // CHECK-NEXT: call i8 asm "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !10 - %1 = llvm.inline_asm "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + %1 = llvm.inline_asm "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: call i8 asm sideeffect "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !11 - %2 = llvm.inline_asm has_side_effects "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + %2 = llvm.inline_asm has_side_effects "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: call i8 asm alignstack "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !12 - %3 = llvm.inline_asm is_align_stack "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + %3 = llvm.inline_asm is_align_stack "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: call i8 asm inteldialect "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}), !dbg !13 - %4 = llvm.inline_asm asm_dialect = "intel" "foo", "=r,r,r" %arg0, %arg0 : (!llvm.i32, !llvm.i32) -> !llvm.i8 + %4 = llvm.inline_asm asm_dialect = "intel" "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: call { i8, i8 } asm "foo", "=r,=r,r"(i32 {{.*}}), !dbg !14 - %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (!llvm.i32) -> !llvm.struct<(i8, i8)> + %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (i32) -> !llvm.struct<(i8, i8)> llvm.return } @@ -1405,57 +1405,57 @@ // ----- // CHECK-LABEL: @switch_args -llvm.func @switch_args(%arg0: !llvm.i32) { - %0 = llvm.mlir.constant(5 : i32) : !llvm.i32 - %1 = llvm.mlir.constant(7 : i32) : !llvm.i32 - %2 = llvm.mlir.constant(11 : i32) : !llvm.i32 +llvm.func @switch_args(%arg0: i32) { + %0 = llvm.mlir.constant(5 : i32) : i32 + %1 = llvm.mlir.constant(7 : i32) : i32 + %2 = llvm.mlir.constant(11 : i32) : i32 // CHECK: switch i32 %[[SWITCH_arg0:[0-9]+]], label %[[SWITCHDEFAULT_bb1:[0-9]+]] [ // CHECK-NEXT: i32 -1, label %[[SWITCHCASE_bb2:[0-9]+]] // CHECK-NEXT: i32 1, label %[[SWITCHCASE_bb3:[0-9]+]] // CHECK-NEXT: ] llvm.switch %arg0, ^bb1 [ - -1: ^bb2(%0 : !llvm.i32), - 1: ^bb3(%1, %2 : !llvm.i32, !llvm.i32) + -1: ^bb2(%0 : i32), + 1: ^bb3(%1, %2 : i32, i32) ] // CHECK: [[SWITCHDEFAULT_bb1]]: // CHECK-NEXT: ret i32 %[[SWITCH_arg0]] ^bb1: // pred: ^bb0 - llvm.return %arg0 : !llvm.i32 + llvm.return %arg0 : i32 // CHECK: [[SWITCHCASE_bb2]]: // CHECK-NEXT: phi i32 [ 5, %1 ] // CHECK-NEXT: ret i32 -^bb2(%3: !llvm.i32): // pred: ^bb0 - llvm.return %1 : !llvm.i32 +^bb2(%3: i32): // pred: ^bb0 + llvm.return %1 : i32 // CHECK: [[SWITCHCASE_bb3]]: // CHECK-NEXT: phi i32 [ 7, %1 ] // CHECK-NEXT: phi i32 [ 11, %1 ] // CHECK-NEXT: ret i32 -^bb3(%4: !llvm.i32, %5: !llvm.i32): // pred: ^bb0 - llvm.return %4 : !llvm.i32 +^bb3(%4: i32, %5: i32): // pred: ^bb0 + llvm.return %4 : i32 } // CHECK-LABEL: @switch_weights -llvm.func @switch_weights(%arg0: !llvm.i32) { - %0 = llvm.mlir.constant(19 : i32) : !llvm.i32 - %1 = llvm.mlir.constant(23 : i32) : !llvm.i32 - %2 = llvm.mlir.constant(29 : i32) : !llvm.i32 +llvm.func @switch_weights(%arg0: i32) { + %0 = llvm.mlir.constant(19 : i32) : i32 + %1 = llvm.mlir.constant(23 : i32) : i32 + %2 = llvm.mlir.constant(29 : i32) : i32 // CHECK: !prof ![[SWITCH_WEIGHT_NODE:[0-9]+]] - llvm.switch %arg0, ^bb1(%0 : !llvm.i32) [ - 9: ^bb2(%1, %2 : !llvm.i32, !llvm.i32), + llvm.switch %arg0, ^bb1(%0 : i32) [ + 9: ^bb2(%1, %2 : i32, i32), 99: ^bb3 ] {branch_weights = dense<[13, 17, 19]> : vector<3xi32>} -^bb1(%3: !llvm.i32): // pred: ^bb0 - llvm.return %3 : !llvm.i32 +^bb1(%3: i32): // pred: ^bb0 + llvm.return %3 : i32 -^bb2(%4: !llvm.i32, %5: !llvm.i32): // pred: ^bb0 - llvm.return %5 : !llvm.i32 +^bb2(%4: i32, %5: i32): // pred: ^bb0 + llvm.return %5 : i32 ^bb3: // pred: ^bb0 - llvm.return %arg0 : !llvm.i32 + llvm.return %arg0 : i32 } // CHECK: ![[SWITCH_WEIGHT_NODE]] = !{!"branch_weights", i32 13, i32 17, i32 19} diff --git a/mlir/test/Target/nvvmir.mlir b/mlir/test/Target/nvvmir.mlir --- a/mlir/test/Target/nvvmir.mlir +++ b/mlir/test/Target/nvvmir.mlir @@ -1,35 +1,35 @@ // RUN: mlir-translate -mlir-to-nvvmir %s | FileCheck %s -llvm.func @nvvm_special_regs() -> !llvm.i32 { +llvm.func @nvvm_special_regs() -> i32 { // CHECK: %1 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() - %1 = nvvm.read.ptx.sreg.tid.x : !llvm.i32 + %1 = nvvm.read.ptx.sreg.tid.x : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.y() - %2 = nvvm.read.ptx.sreg.tid.y : !llvm.i32 + %2 = nvvm.read.ptx.sreg.tid.y : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.z() - %3 = nvvm.read.ptx.sreg.tid.z : !llvm.i32 + %3 = nvvm.read.ptx.sreg.tid.z : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() - %4 = nvvm.read.ptx.sreg.ntid.x : !llvm.i32 + %4 = nvvm.read.ptx.sreg.ntid.x : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ntid.y() - %5 = nvvm.read.ptx.sreg.ntid.y : !llvm.i32 + %5 = nvvm.read.ptx.sreg.ntid.y : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ntid.z() - %6 = nvvm.read.ptx.sreg.ntid.z : !llvm.i32 + %6 = nvvm.read.ptx.sreg.ntid.z : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() - %7 = nvvm.read.ptx.sreg.ctaid.x : !llvm.i32 + %7 = nvvm.read.ptx.sreg.ctaid.x : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.y() - %8 = nvvm.read.ptx.sreg.ctaid.y : !llvm.i32 + %8 = nvvm.read.ptx.sreg.ctaid.y : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.z() - %9 = nvvm.read.ptx.sreg.ctaid.z : !llvm.i32 + %9 = nvvm.read.ptx.sreg.ctaid.z : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.nctaid.x() - %10 = nvvm.read.ptx.sreg.nctaid.x : !llvm.i32 + %10 = nvvm.read.ptx.sreg.nctaid.x : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.nctaid.y() - %11 = nvvm.read.ptx.sreg.nctaid.y : !llvm.i32 + %11 = nvvm.read.ptx.sreg.nctaid.y : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.nctaid.z() - %12 = nvvm.read.ptx.sreg.nctaid.z : !llvm.i32 + %12 = nvvm.read.ptx.sreg.nctaid.z : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - %13 = nvvm.read.ptx.sreg.warpsize : !llvm.i32 + %13 = nvvm.read.ptx.sreg.warpsize : i32 // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.laneid() - %14 = nvvm.read.ptx.sreg.laneid : !llvm.i32 - llvm.return %1 : !llvm.i32 + %14 = nvvm.read.ptx.sreg.laneid : i32 + llvm.return %1 : i32 } llvm.func @llvm.nvvm.barrier0() { @@ -39,18 +39,18 @@ } llvm.func @nvvm_shfl( - %0 : !llvm.i32, %1 : !llvm.i32, %2 : !llvm.i32, - %3 : !llvm.i32, %4 : !llvm.float) -> !llvm.i32 { + %0 : i32, %1 : i32, %2 : i32, + %3 : i32, %4 : !llvm.float) -> i32 { // CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) - %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 : !llvm.i32 + %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 : i32 // CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 %{{.*}}, float %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %7 = nvvm.shfl.sync.bfly %0, %4, %1, %2 : !llvm.float - llvm.return %6 : !llvm.i32 + llvm.return %6 : i32 } llvm.func @nvvm_shfl_pred( - %0 : !llvm.i32, %1 : !llvm.i32, %2 : !llvm.i32, - %3 : !llvm.i32, %4 : !llvm.float) -> !llvm.struct<(i32, i1)> { + %0 : i32, %1 : i32, %2 : i32, + %3 : i32, %4 : !llvm.float) -> !llvm.struct<(i32, i1)> { // CHECK: call { i32, i1 } @llvm.nvvm.shfl.sync.bfly.i32p(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 {return_value_and_is_valid} : !llvm.struct<(i32, i1)> // CHECK: call { float, i1 } @llvm.nvvm.shfl.sync.bfly.f32p(i32 %{{.*}}, float %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) @@ -58,10 +58,10 @@ llvm.return %6 : !llvm.struct<(i32, i1)> } -llvm.func @nvvm_vote(%0 : !llvm.i32, %1 : !llvm.i1) -> !llvm.i32 { +llvm.func @nvvm_vote(%0 : i32, %1 : i1) -> i32 { // CHECK: call i32 @llvm.nvvm.vote.ballot.sync(i32 %{{.*}}, i1 %{{.*}}) - %3 = nvvm.vote.ballot.sync %0, %1 : !llvm.i32 - llvm.return %3 : !llvm.i32 + %3 = nvvm.vote.ballot.sync %0, %1 : i32 + llvm.return %3 : i32 } llvm.func @nvvm_mma(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>, diff --git a/mlir/test/Target/openmp-llvm.mlir b/mlir/test/Target/openmp-llvm.mlir --- a/mlir/test/Target/openmp-llvm.mlir +++ b/mlir/test/Target/openmp-llvm.mlir @@ -19,19 +19,19 @@ } // CHECK-LABEL: define void @test_flush_construct(i32 %0) -llvm.func @test_flush_construct(%arg0: !llvm.i32) { +llvm.func @test_flush_construct(%arg0: i32) { // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}} omp.flush // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}} - omp.flush (%arg0 : !llvm.i32) + omp.flush (%arg0 : i32) // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}} - omp.flush (%arg0, %arg0 : !llvm.i32, !llvm.i32) + omp.flush (%arg0, %arg0 : i32, i32) - %0 = llvm.mlir.constant(1 : i64) : !llvm.i64 + %0 = llvm.mlir.constant(1 : i64) : i64 // CHECK: alloca {{.*}} align 4 - %1 = llvm.alloca %0 x !llvm.i32 {in_type = i32, name = "a"} : (!llvm.i64) -> !llvm.ptr + %1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}} omp.flush // CHECK: load i32, i32* @@ -55,22 +55,22 @@ // CHECK: define internal void @[[OMP_OUTLINED_FN_1]] // CHECK: call void @__kmpc_barrier -llvm.func @body(!llvm.i64) +llvm.func @body(i64) // CHECK-LABEL: define void @test_omp_parallel_2() llvm.func @test_omp_parallel_2() -> () { // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]] to {{.*}} omp.parallel { ^bb0: - %0 = llvm.mlir.constant(1 : index) : !llvm.i64 - %1 = llvm.mlir.constant(42 : index) : !llvm.i64 - llvm.call @body(%0) : (!llvm.i64) -> () - llvm.call @body(%1) : (!llvm.i64) -> () + %0 = llvm.mlir.constant(1 : index) : i64 + %1 = llvm.mlir.constant(42 : index) : i64 + llvm.call @body(%0) : (i64) -> () + llvm.call @body(%1) : (i64) -> () llvm.br ^bb1 ^bb1: - %2 = llvm.add %0, %1 : !llvm.i64 - llvm.call @body(%2) : (!llvm.i64) -> () + %2 = llvm.add %0, %1 : i64 + llvm.call @body(%2) : (i64) -> () omp.terminator } llvm.return @@ -88,11 +88,11 @@ // CHECK: br label %omp.par.pre_finalize // CHECK: define void @test_omp_parallel_num_threads_1(i32 %[[NUM_THREADS_VAR_1:.*]]) -llvm.func @test_omp_parallel_num_threads_1(%arg0: !llvm.i32) -> () { +llvm.func @test_omp_parallel_num_threads_1(%arg0: i32) -> () { // CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_1:.*]]) // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]]) // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]] to {{.*}} - omp.parallel num_threads(%arg0: !llvm.i32) { + omp.parallel num_threads(%arg0: i32) { omp.barrier omp.terminator } @@ -105,11 +105,11 @@ // CHECK: define void @test_omp_parallel_num_threads_2() llvm.func @test_omp_parallel_num_threads_2() -> () { - %0 = llvm.mlir.constant(4 : index) : !llvm.i32 + %0 = llvm.mlir.constant(4 : index) : i32 // CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_2:.*]]) // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4) // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]] to {{.*}} - omp.parallel num_threads(%0: !llvm.i32) { + omp.parallel num_threads(%0: i32) { omp.barrier omp.terminator } @@ -122,19 +122,19 @@ // CHECK: define void @test_omp_parallel_num_threads_3() llvm.func @test_omp_parallel_num_threads_3() -> () { - %0 = llvm.mlir.constant(4 : index) : !llvm.i32 + %0 = llvm.mlir.constant(4 : index) : i32 // CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_1:.*]]) // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4) // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]] to {{.*}} - omp.parallel num_threads(%0: !llvm.i32) { + omp.parallel num_threads(%0: i32) { omp.barrier omp.terminator } - %1 = llvm.mlir.constant(8 : index) : !llvm.i32 + %1 = llvm.mlir.constant(8 : index) : i32 // CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_2:.*]]) // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8) // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]] to {{.*}} - omp.parallel num_threads(%1: !llvm.i32) { + omp.parallel num_threads(%1: i32) { omp.barrier omp.terminator } @@ -149,11 +149,11 @@ // CHECK: call void @__kmpc_barrier // CHECK: define void @test_omp_parallel_if_1(i32 %[[IF_VAR_1:.*]]) -llvm.func @test_omp_parallel_if_1(%arg0: !llvm.i32) -> () { +llvm.func @test_omp_parallel_if_1(%arg0: i32) -> () { // CHECK: %[[IF_COND_VAR_1:.*]] = icmp slt i32 %[[IF_VAR_1]], 0 - %0 = llvm.mlir.constant(0 : index) : !llvm.i32 - %1 = llvm.icmp "slt" %arg0, %0 : !llvm.i32 + %0 = llvm.mlir.constant(0 : index) : i32 + %1 = llvm.icmp "slt" %arg0, %0 : i32 // CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[SI_VAR_IF_1:.*]]) // CHECK: br i1 %[[IF_COND_VAR_1]], label %[[IF_COND_TRUE_BLOCK_1:.*]], label %[[IF_COND_FALSE_BLOCK_1:.*]] @@ -171,7 +171,7 @@ // CHECK: call void @[[OMP_OUTLINED_FN_IF_1]] // CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]]) // CHECK: br label %[[RETURN_BLOCK_IF_1]] - omp.parallel if(%1 : !llvm.i1) { + omp.parallel if(%1 : i1) { omp.barrier omp.terminator } @@ -303,22 +303,22 @@ // CHECK-LABEL: @wsloop_simple llvm.func @wsloop_simple(%arg0: !llvm.ptr) { - %0 = llvm.mlir.constant(42 : index) : !llvm.i64 - %1 = llvm.mlir.constant(10 : index) : !llvm.i64 - %2 = llvm.mlir.constant(1 : index) : !llvm.i64 + %0 = llvm.mlir.constant(42 : index) : i64 + %1 = llvm.mlir.constant(10 : index) : i64 + %2 = llvm.mlir.constant(1 : index) : i64 omp.parallel { "omp.wsloop"(%1, %0, %2) ( { - ^bb0(%arg1: !llvm.i64): + ^bb0(%arg1: i64): // The form of the emitted IR is controlled by OpenMPIRBuilder and // tested there. Just check that the right functions are called. // CHECK: call i32 @__kmpc_global_thread_num // CHECK: call void @__kmpc_for_static_init_{{.*}}(%struct.ident_t* @[[$wsloop_loc_struct]], %3 = llvm.mlir.constant(2.000000e+00 : f32) : !llvm.float - %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %3, %4 : !llvm.ptr omp.yield // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* @[[$wsloop_loc_struct]], - }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (!llvm.i64, !llvm.i64, !llvm.i64) -> () + }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (i64, i64, i64) -> () omp.terminator } llvm.return diff --git a/mlir/test/Target/rocdl.mlir b/mlir/test/Target/rocdl.mlir --- a/mlir/test/Target/rocdl.mlir +++ b/mlir/test/Target/rocdl.mlir @@ -1,32 +1,32 @@ // RUN: mlir-translate -mlir-to-rocdlir %s | FileCheck %s -llvm.func @rocdl_special_regs() -> !llvm.i32 { +llvm.func @rocdl_special_regs() -> i32 { // CHECK-LABEL: rocdl_special_regs // CHECK: call i32 @llvm.amdgcn.workitem.id.x() - %1 = rocdl.workitem.id.x : !llvm.i32 + %1 = rocdl.workitem.id.x : i32 // CHECK: call i32 @llvm.amdgcn.workitem.id.y() - %2 = rocdl.workitem.id.y : !llvm.i32 + %2 = rocdl.workitem.id.y : i32 // CHECK: call i32 @llvm.amdgcn.workitem.id.z() - %3 = rocdl.workitem.id.z : !llvm.i32 + %3 = rocdl.workitem.id.z : i32 // CHECK: call i32 @llvm.amdgcn.workgroup.id.x() - %4 = rocdl.workgroup.id.x : !llvm.i32 + %4 = rocdl.workgroup.id.x : i32 // CHECK: call i32 @llvm.amdgcn.workgroup.id.y() - %5 = rocdl.workgroup.id.y : !llvm.i32 + %5 = rocdl.workgroup.id.y : i32 // CHECK: call i32 @llvm.amdgcn.workgroup.id.z() - %6 = rocdl.workgroup.id.z : !llvm.i32 + %6 = rocdl.workgroup.id.z : i32 // CHECK: call i64 @__ockl_get_local_size(i32 0) - %7 = rocdl.workgroup.dim.x : !llvm.i64 + %7 = rocdl.workgroup.dim.x : i64 // CHECK: call i64 @__ockl_get_local_size(i32 1) - %8 = rocdl.workgroup.dim.y : !llvm.i64 + %8 = rocdl.workgroup.dim.y : i64 // CHECK: call i64 @__ockl_get_local_size(i32 2) - %9 = rocdl.workgroup.dim.z : !llvm.i64 + %9 = rocdl.workgroup.dim.z : i64 // CHECK: call i64 @__ockl_get_global_size(i32 0) - %10 = rocdl.grid.dim.x : !llvm.i64 + %10 = rocdl.grid.dim.x : i64 // CHECK: call i64 @__ockl_get_global_size(i32 1) - %11 = rocdl.grid.dim.y : !llvm.i64 + %11 = rocdl.grid.dim.y : i64 // CHECK: call i64 @__ockl_get_global_size(i32 2) - %12 = rocdl.grid.dim.z : !llvm.i64 - llvm.return %1 : !llvm.i32 + %12 = rocdl.grid.dim.z : i64 + llvm.return %1 : i32 } llvm.func @kernel_func() attributes {gpu.kernel} { @@ -43,7 +43,7 @@ } llvm.func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float, - %arg2 : !llvm.vec<32 x float>, %arg3 : !llvm.i32, + %arg2 : !llvm.vec<32 x float>, %arg3 : i32, %arg4 : !llvm.vec<16 x float>, %arg5 : !llvm.vec<4 x float>, %arg6 : !llvm.vec<4 x half>, %arg7 : !llvm.vec<32 x i32>, %arg8 : !llvm.vec<16 x i32>, %arg9 : !llvm.vec<4 x i32>, @@ -52,109 +52,109 @@ // CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %{{.*}}, float %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r0 = rocdl.mfma.f32.32x32x1f32 %arg0, %arg1, %arg2, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + i32, i32, i32) -> !llvm.vec<32 x float> // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float %{{.*}}, float %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r1 = rocdl.mfma.f32.16x16x1f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x4f32(float %{{.*}}, float %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r2 = rocdl.mfma.f32.16x16x4f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float %{{.*}}, float %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r3 = rocdl.mfma.f32.4x4x1f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x2f32(float %{{.*}}, float %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r4= rocdl.mfma.f32.32x32x2f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> // CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r5 = rocdl.mfma.f32.32x32x4f16 %arg6, %arg6, %arg2, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + i32, i32, i32) -> !llvm.vec<32 x float> // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r6 = rocdl.mfma.f32.16x16x4f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r7 = rocdl.mfma.f32.4x4x4f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x8f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r8 = rocdl.mfma.f32.32x32x8f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r9 = rocdl.mfma.f32.16x16x16f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> // CHECK: call <32 x i32> @llvm.amdgcn.mfma.i32.32x32x4i8(i32 %{{.*}}, i32 %{{.*}}, <32 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r10 = rocdl.mfma.i32.32x32x4i8 %arg3, %arg3, %arg7, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<32 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x i32> + (i32, i32, !llvm.vec<32 x i32>, + i32, i32, i32) -> !llvm.vec<32 x i32> // CHECK: call <16 x i32> @llvm.amdgcn.mfma.i32.16x16x4i8(i32 %{{.*}}, i32 %{{.*}}, <16 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r11 = rocdl.mfma.i32.16x16x4i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32> + (i32, i32, !llvm.vec<16 x i32>, + i32, i32, i32) -> !llvm.vec<16 x i32> // CHECK: call <4 x i32> @llvm.amdgcn.mfma.i32.4x4x4i8(i32 %{{.*}}, i32 %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r12 = rocdl.mfma.i32.4x4x4i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32> + (i32, i32, !llvm.vec<4 x i32>, + i32, i32, i32) -> !llvm.vec<4 x i32> // CHECK: call <16 x i32> @llvm.amdgcn.mfma.i32.32x32x8i8(i32 %{{.*}}, i32 %{{.*}}, <16 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r13 = rocdl.mfma.i32.32x32x8i8 %arg3, %arg3, %arg8, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<16 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x i32> + (i32, i32, !llvm.vec<16 x i32>, + i32, i32, i32) -> !llvm.vec<16 x i32> // CHECK: call <4 x i32> @llvm.amdgcn.mfma.i32.16x16x16i8(i32 %{{.*}}, i32 %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r14 = rocdl.mfma.i32.16x16x16i8 %arg3, %arg3, %arg9, %arg3, %arg3, %arg3 : - (!llvm.i32, !llvm.i32, !llvm.vec<4 x i32>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x i32> + (i32, i32, !llvm.vec<4 x i32>, + i32, i32, i32) -> !llvm.vec<4 x i32> // CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r15 = rocdl.mfma.f32.32x32x2bf16 %arg10, %arg10, %arg2, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<32 x float> + i32, i32, i32) -> !llvm.vec<32 x float> // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r16 = rocdl.mfma.f32.16x16x2bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r17 = rocdl.mfma.f32.4x4x2bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x4bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r18 = rocdl.mfma.f32.32x32x4bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<16 x float> + i32, i32, i32) -> !llvm.vec<16 x float> // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x8bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) %r19 = rocdl.mfma.f32.16x16x8bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, - !llvm.i32, !llvm.i32, !llvm.i32) -> !llvm.vec<4 x float> + i32, i32, i32) -> !llvm.vec<4 x float> llvm.return %r0 : !llvm.vec<32 x float> } -llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : !llvm.i32, - %offset : !llvm.i32, %glc : !llvm.i1, - %slc : !llvm.i1, %vdata1 : !llvm.vec<1 x float>, +llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : i32, + %offset : i32, %glc : i1, + %slc : i1, %vdata1 : !llvm.vec<1 x float>, %vdata2 : !llvm.vec<2 x float>, %vdata4 : !llvm.vec<4 x float>) { // CHECK-LABEL: rocdl.mubuf // CHECK: call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}}) diff --git a/mlir/test/Transforms/test-convert-call-op.mlir b/mlir/test/Transforms/test-convert-call-op.mlir --- a/mlir/test/Transforms/test-convert-call-op.mlir +++ b/mlir/test/Transforms/test-convert-call-op.mlir @@ -1,9 +1,9 @@ // RUN: mlir-opt %s -test-convert-call-op | FileCheck %s -// CHECK-LABEL: llvm.func @callee(!llvm.ptr) -> !llvm.i32 +// CHECK-LABEL: llvm.func @callee(!llvm.ptr) -> i32 func private @callee(!test.test_type) -> i32 -// CHECK-NEXT: llvm.func @caller() -> !llvm.i32 +// CHECK-NEXT: llvm.func @caller() -> i32 func @caller() -> i32 { %arg = "test.type_producer"() : () -> !test.test_type %out = call @callee(%arg) : (!test.test_type) -> i32 @@ -11,4 +11,4 @@ } // CHECK-NEXT: [[ARG:%.*]] = llvm.mlir.null : !llvm.ptr // CHECK-NEXT: [[OUT:%.*]] = llvm.call @callee([[ARG]]) -// CHECK-SAME: : (!llvm.ptr) -> !llvm.i32 +// CHECK-SAME: : (!llvm.ptr) -> i32 diff --git a/mlir/test/lib/Transforms/TestConvertCallOp.cpp b/mlir/test/lib/Transforms/TestConvertCallOp.cpp --- a/mlir/test/lib/Transforms/TestConvertCallOp.cpp +++ b/mlir/test/lib/Transforms/TestConvertCallOp.cpp @@ -45,8 +45,7 @@ // Populate type conversions. LLVMTypeConverter type_converter(m.getContext()); type_converter.addConversion([&](test::TestType type) { - return LLVM::LLVMPointerType::get( - LLVM::LLVMIntegerType::get(m.getContext(), 8)); + return LLVM::LLVMPointerType::get(IntegerType::get(m.getContext(), 8)); }); // Populate patterns. diff --git a/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir b/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir --- a/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir +++ b/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir @@ -26,7 +26,7 @@ } // External declarations. -llvm.func @malloc(!llvm.i64) -> !llvm.ptr +llvm.func @malloc(i64) -> !llvm.ptr llvm.func @free(!llvm.ptr) func private @printF32(%arg0: f32) func private @printComma() diff --git a/mlir/test/mlir-cpu-runner/simple.mlir b/mlir/test/mlir-cpu-runner/simple.mlir --- a/mlir/test/mlir-cpu-runner/simple.mlir +++ b/mlir/test/mlir-cpu-runner/simple.mlir @@ -15,7 +15,7 @@ // Declarations of C library functions. llvm.func @fabsf(!llvm.float) -> !llvm.float -llvm.func @malloc(!llvm.i64) -> !llvm.ptr +llvm.func @malloc(i64) -> !llvm.ptr llvm.func @free(!llvm.ptr) // Check that a simple function with a nested call works. @@ -28,8 +28,8 @@ // Helper typed functions wrapping calls to "malloc" and "free". llvm.func @allocation() -> !llvm.ptr { - %0 = llvm.mlir.constant(4 : index) : !llvm.i64 - %1 = llvm.call @malloc(%0) : (!llvm.i64) -> !llvm.ptr + %0 = llvm.mlir.constant(4 : index) : i64 + %1 = llvm.call @malloc(%0) : (i64) -> !llvm.ptr %2 = llvm.bitcast %1 : !llvm.ptr to !llvm.ptr llvm.return %2 : !llvm.ptr } @@ -43,11 +43,11 @@ // works. llvm.func @foo() -> !llvm.float { %0 = llvm.call @allocation() : () -> !llvm.ptr - %1 = llvm.mlir.constant(0 : index) : !llvm.i64 + %1 = llvm.mlir.constant(0 : index) : i64 %2 = llvm.mlir.constant(1.234000e+03 : f32) : !llvm.float - %3 = llvm.getelementptr %0[%1] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %3 = llvm.getelementptr %0[%1] : (!llvm.ptr, i64) -> !llvm.ptr llvm.store %2, %3 : !llvm.ptr - %4 = llvm.getelementptr %0[%1] : (!llvm.ptr, !llvm.i64) -> !llvm.ptr + %4 = llvm.getelementptr %0[%1] : (!llvm.ptr, i64) -> !llvm.ptr %5 = llvm.load %4 : !llvm.ptr llvm.call @deallocation(%0) : (!llvm.ptr) -> () llvm.return %5 : !llvm.float @@ -55,15 +55,15 @@ // NOMAIN: 1.234000e+03 // Check that i32 return type works -llvm.func @int32_main() -> !llvm.i32 { - %0 = llvm.mlir.constant(42 : i32) : !llvm.i32 - llvm.return %0 : !llvm.i32 +llvm.func @int32_main() -> i32 { + %0 = llvm.mlir.constant(42 : i32) : i32 + llvm.return %0 : i32 } // INT32MAIN: 42 // Check that i64 return type works -llvm.func @int64_main() -> !llvm.i64 { - %0 = llvm.mlir.constant(42 : i64) : !llvm.i64 - llvm.return %0 : !llvm.i64 +llvm.func @int64_main() -> i64 { + %0 = llvm.mlir.constant(42 : i64) : i64 + llvm.return %0 : i64 } // INT64MAIN: 42