diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -525,11 +525,11 @@ IR dialect disallows them. The `call` instruction supports both direct and indirect calls. Direct calls - start with a function name (`@`-prefixed) and indirect calls start with an - SSA value (`%`-prefixed). The direct callee, if present, is stored as a - function attribute `callee`. The trailing type of the instruction is always - the MLIR function type, which may be different from the indirect callee that - has the wrapped LLVM IR function type. + start with a function name (`@`-prefixed) and indirect calls start with a + typed SSA value (`%`-prefixed) in parenthesis. The direct callee, if + present, is stored as a function attribute `callee`. The trailing type of + the instruction is always the MLIR function type, which may be different + from the indirect callee that has the wrapped LLVM IR function type. Examples: @@ -541,7 +541,7 @@ llvm.call @bar(%0) : (f32) -> () // Indirect call with an argument and without a result. - llvm.call %1(%0) : (f32) -> () + llvm.call (%1: !llvm.ptr)(%0) : (f32) -> () ``` }]; diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -939,7 +939,7 @@ if (isDirect) p.printSymbolName(callee.value()); else - p << getOperand(0); + p << '(' << getOperand(0) << ": " << getOperand(0).getType() << ')'; p << '(' << getOperands().drop_front(isDirect ? 0 : 1) << ')'; p << " to "; @@ -954,30 +954,33 @@ getResultTypes()); } -/// ::= `llvm.invoke` (function-id | ssa-use) `(` ssa-use-list `)` -/// `to` bb-id (`[` ssa-use-and-type-list `]`)? -/// `unwind` bb-id (`[` ssa-use-and-type-list `]`)? -/// attribute-dict? `:` function-type +// ::= `llvm.invoke` (function-id | `(` ssa-use-and-type `)`) +// `(` ssa-use-list `)` +// `to` bb-id (`[` ssa-use-and-type-list `]`)? +// `unwind` bb-id (`[` ssa-use-and-type-list `]`)? +// attribute-dict? `:` function-type ParseResult InvokeOp::parse(OpAsmParser &parser, OperationState &result) { + OpAsmParser::UnresolvedOperand funcPtr; + Type funcPtrType; + SymbolRefAttr funcAttr; SmallVector operands; FunctionType funcType; - SymbolRefAttr funcAttr; SMLoc trailingTypeLoc; Block *normalDest, *unwindDest; SmallVector normalOperands, unwindOperands; Builder &builder = parser.getBuilder(); - // Parse an operand list that will, in practice, contain 0 or 1 operand. In - // case of an indirect call, there will be 1 operand before `(`. In case of a - // direct call, there will be no operands and the parser will stop at the - // function identifier without complaining. - if (parser.parseOperandList(operands)) - return failure(); - bool isDirect = operands.empty(); - - // Optionally parse a function identifier. - if (isDirect && parser.parseAttribute(funcAttr, "callee", result.attributes)) - return failure(); + if (succeeded(parser.parseOptionalLParen())) { + // Parse the function pointer operand and type of an indirect call. + if (parser.parseOperand(funcPtr) || parser.parseColonType(funcPtrType) || + parser.parseRParen() || + parser.resolveOperand(funcPtr, funcPtrType, result.operands)) + return failure(); + } else { + // Parse the function identifier of a direct call. + if (parser.parseAttribute(funcAttr, "callee", result.attributes)) + return failure(); + } if (parser.parseOperandList(operands, OpAsmParser::Delimiter::Paren) || parser.parseKeyword("to") || @@ -988,61 +991,22 @@ parser.getCurrentLocation(&trailingTypeLoc) || parser.parseType(funcType)) return failure(); - if (isDirect) { - // Make sure types match. - if (parser.resolveOperands(operands, funcType.getInputs(), - parser.getNameLoc(), result.operands)) - return failure(); - result.addTypes(funcType.getResults()); - } else { - // Construct the LLVM IR Dialect function type that the first operand - // should match. - if (funcType.getNumResults() > 1) - return parser.emitError(trailingTypeLoc, - "expected function with 0 or 1 result"); - - Type llvmResultType; - if (funcType.getNumResults() == 0) { - llvmResultType = LLVM::LLVMVoidType::get(builder.getContext()); - } else { - llvmResultType = funcType.getResult(0); - if (!isCompatibleType(llvmResultType)) - return parser.emitError(trailingTypeLoc, - "expected result to have LLVM type"); - } - - SmallVector argTypes; - argTypes.reserve(funcType.getNumInputs()); - for (Type ty : funcType.getInputs()) { - if (isCompatibleType(ty)) - argTypes.push_back(ty); - else - return parser.emitError(trailingTypeLoc, - "expected LLVM types as inputs"); - } - - auto llvmFuncType = LLVM::LLVMFunctionType::get(llvmResultType, argTypes); - auto wrappedFuncType = LLVM::LLVMPointerType::get(llvmFuncType); - - auto funcArguments = llvm::ArrayRef(operands).drop_front(); - - // Make sure that the first operand (indirect callee) matches the wrapped - // LLVM IR function type, and that the types of the other call operands - // match the types of the function arguments. - if (parser.resolveOperand(operands[0], wrappedFuncType, result.operands) || - parser.resolveOperands(funcArguments, funcType.getInputs(), - parser.getNameLoc(), result.operands)) - return failure(); - - result.addTypes(llvmResultType); - } + if (funcType.getNumResults() > 1) + return parser.emitError(trailingTypeLoc, + "expected function with 0 or 1 result"); + if (parser.resolveOperands(operands, funcType.getInputs(), + parser.getNameLoc(), result.operands)) + return failure(); + result.addTypes(funcType.getResults()); result.addSuccessors({normalDest, unwindDest}); result.addOperands(normalOperands); result.addOperands(unwindOperands); + size_t callOperandsSize = + result.operands.size() - normalOperands.size() - unwindOperands.size(); result.addAttribute(InvokeOp::getOperandSegmentSizeAttr(), builder.getDenseI32ArrayAttr( - {static_cast(operands.size()), + {static_cast(callOperandsSize), static_cast(normalOperands.size()), static_cast(unwindOperands.size())})); return success(); @@ -1108,8 +1072,8 @@ p << ": " << getType(); } -/// ::= `llvm.landingpad` `cleanup`? -/// ((`catch` | `filter`) operand-type ssa-use)* attribute-dict? +// ::= `llvm.landingpad` `cleanup`? +// ((`catch` | `filter`) operand-type ssa-use)* attribute-dict? ParseResult LandingpadOp::parse(OpAsmParser &parser, OperationState &result) { // Check for cleanup if (succeeded(parser.parseOptionalKeyword("cleanup"))) @@ -1274,7 +1238,7 @@ if (isDirect) p.printSymbolName(callee.value()); else - p << getOperand(0); + p << '(' << getOperand(0) << ": " << getOperand(0).getType() << ')'; auto args = getOperands().drop_front(isDirect ? 0 : 1); p << '(' << args << ')'; @@ -1285,26 +1249,27 @@ p.printFunctionalType(args.getTypes(), getResultTypes()); } -// ::= `llvm.call` (function-id | ssa-use) `(` ssa-use-list `)` -// attribute-dict? `:` function-type +// ::= `llvm.call` (function-id | `(` ssa-use-and-type `)`) +// `(` ssa-use-list `)` attribute-dict? `:` function-type ParseResult CallOp::parse(OpAsmParser &parser, OperationState &result) { + OpAsmParser::UnresolvedOperand funcPtr; + Type funcPtrType; + SymbolRefAttr funcAttr; SmallVector operands; Type type; - SymbolRefAttr funcAttr; SMLoc trailingTypeLoc; - // Parse an operand list that will, in practice, contain 0 or 1 operand. In - // case of an indirect call, there will be 1 operand before `(`. In case of a - // direct call, there will be no operands and the parser will stop at the - // function identifier without complaining. - if (parser.parseOperandList(operands)) - return failure(); - bool isDirect = operands.empty(); - - // Optionally parse a function identifier. - if (isDirect) + if (succeeded(parser.parseOptionalLParen())) { + // Parse the function pointer operand and type of an indirect call. + if (parser.parseOperand(funcPtr) || parser.parseColonType(funcPtrType) || + parser.parseRParen() || + parser.resolveOperand(funcPtr, funcPtrType, result.operands)) + return failure(); + } else { + // Parse the function identifier of a direct call. if (parser.parseAttribute(funcAttr, "callee", result.attributes)) return failure(); + } if (parser.parseOperandList(operands, OpAsmParser::Delimiter::Paren) || parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || @@ -1317,53 +1282,12 @@ if (funcType.getNumResults() > 1) return parser.emitError(trailingTypeLoc, "expected function with 0 or 1 result"); - if (isDirect) { - // Make sure types match. - if (parser.resolveOperands(operands, funcType.getInputs(), - parser.getNameLoc(), result.operands)) - return failure(); - if (funcType.getNumResults() != 0 && - !funcType.getResult(0).isa()) - result.addTypes(funcType.getResults()); - } else { - Builder &builder = parser.getBuilder(); - Type llvmResultType; - if (funcType.getNumResults() == 0) { - llvmResultType = LLVM::LLVMVoidType::get(builder.getContext()); - } else { - llvmResultType = funcType.getResult(0); - if (!isCompatibleType(llvmResultType)) - return parser.emitError(trailingTypeLoc, - "expected result to have LLVM type"); - } - - SmallVector argTypes; - argTypes.reserve(funcType.getNumInputs()); - for (int i = 0, e = funcType.getNumInputs(); i < e; ++i) { - auto argType = funcType.getInput(i); - if (!isCompatibleType(argType)) - return parser.emitError(trailingTypeLoc, - "expected LLVM types as inputs"); - argTypes.push_back(argType); - } - auto llvmFuncType = LLVM::LLVMFunctionType::get(llvmResultType, argTypes); - auto wrappedFuncType = LLVM::LLVMPointerType::get(llvmFuncType); - - auto funcArguments = - ArrayRef(operands).drop_front(); - - // Make sure that the first operand (indirect callee) matches the wrapped - // LLVM IR function type, and that the types of the other call operands - // match the types of the function arguments. - if (parser.resolveOperand(operands[0], wrappedFuncType, result.operands) || - parser.resolveOperands(funcArguments, funcType.getInputs(), - parser.getNameLoc(), result.operands)) - return failure(); - - if (!llvmResultType.isa()) - result.addTypes(llvmResultType); - } - + if (parser.resolveOperands(operands, funcType.getInputs(), + parser.getNameLoc(), result.operands)) + return failure(); + if (funcType.getNumResults() != 0 && + !funcType.getResult(0).isa()) + result.addTypes(funcType.getResults()); return success(); } @@ -2323,25 +2247,27 @@ p << ' ' << stringifyAtomicBinOp(getBinOp()) << ' ' << getPtr() << ", " << getVal() << ' ' << stringifyAtomicOrdering(getOrdering()) << ' '; p.printOptionalAttrDict((*this)->getAttrs(), {"bin_op", "ordering"}); - p << " : " << getRes().getType(); + p << " : " << getPtr().getType() << ", " << getVal().getType(); } // ::= `llvm.atomicrmw` keyword ssa-use `,` ssa-use keyword -// attribute-dict? `:` type +// attribute-dict? `:` type, type ParseResult AtomicRMWOp::parse(OpAsmParser &parser, OperationState &result) { - Type type; - OpAsmParser::UnresolvedOperand ptr, val; - if (parseAtomicBinOp(parser, result, "bin_op") || parser.parseOperand(ptr) || - parser.parseComma() || parser.parseOperand(val) || + if (parseAtomicBinOp(parser, result, "bin_op")) + return failure(); + + SmallVector argumentTypes; + SmallVector operands; + SMLoc operandsLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(operands, /*requiredOperandCount=*/2) || parseAtomicOrdering(parser, result, "ordering") || parser.parseOptionalAttrDict(result.attributes) || - parser.parseColonType(type) || - parser.resolveOperand(ptr, LLVM::LLVMPointerType::get(type), - result.operands) || - parser.resolveOperand(val, type, result.operands)) + parser.parseColonTypeList(argumentTypes) || + parser.resolveOperands(operands, argumentTypes, operandsLoc, + result.operands)) return failure(); - result.addTypes(type); + result.addTypes(argumentTypes.back()); return success(); } @@ -2393,32 +2319,30 @@ << stringifyAtomicOrdering(getFailureOrdering()); p.printOptionalAttrDict((*this)->getAttrs(), {"success_ordering", "failure_ordering"}); - p << " : " << getVal().getType(); + p << " : " << getPtr().getType() << ", " << getCmp().getType() << ", " + << getVal().getType(); } -// ::= `llvm.cmpxchg` ssa-use `,` ssa-use `,` ssa-use -// keyword keyword attribute-dict? `:` type +// ::= `llvm.cmpxchg` ssa-use `,` ssa-use p `,` ssa-use +// keyword keyword attribute-dict? `:` type, type, type ParseResult AtomicCmpXchgOp::parse(OpAsmParser &parser, OperationState &result) { auto &builder = parser.getBuilder(); - Type type; - OpAsmParser::UnresolvedOperand ptr, cmp, val; - if (parser.parseOperand(ptr) || parser.parseComma() || - parser.parseOperand(cmp) || parser.parseComma() || - parser.parseOperand(val) || + SmallVector argumentTypes; + SmallVector operands; + SMLoc operandsLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(operands, /*requiredOperandCount=*/3) || parseAtomicOrdering(parser, result, "success_ordering") || parseAtomicOrdering(parser, result, "failure_ordering") || parser.parseOptionalAttrDict(result.attributes) || - parser.parseColonType(type) || - parser.resolveOperand(ptr, LLVM::LLVMPointerType::get(type), - result.operands) || - parser.resolveOperand(cmp, type, result.operands) || - parser.resolveOperand(val, type, result.operands)) + parser.parseColonTypeList(argumentTypes) || + parser.resolveOperands(operands, argumentTypes, operandsLoc, + result.operands)) return failure(); auto boolType = IntegerType::get(builder.getContext(), 1); - auto resultType = - LLVMStructType::getLiteral(builder.getContext(), {type, boolType}); + auto resultType = LLVMStructType::getLiteral( + builder.getContext(), {argumentTypes.back(), boolType}); result.addTypes(resultType); return success(); diff --git a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir --- a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir +++ b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir @@ -52,7 +52,7 @@ func.func @indirect_const_call(%arg0: i32) { // CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @body : !llvm.ptr> %0 = constant @body : (i32) -> () -// CHECK-NEXT: llvm.call %[[ADDR]](%[[ARG0:.*]]) : (i32) -> () +// CHECK-NEXT: llvm.call (%[[ADDR]]: !llvm.ptr>)(%[[ARG0:.*]]) : (i32) -> () call_indirect %0(%arg0) : (i32) -> () // CHECK-NEXT: llvm.return return @@ -60,7 +60,7 @@ // CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr>, %arg1: f32) -> i32 { func.func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 { -// CHECK-NEXT: %0 = llvm.call %arg0(%arg1) : (f32) -> i32 +// CHECK-NEXT: %0 = llvm.call (%arg0: !llvm.ptr>)(%arg1) : (f32) -> i32 %0 = call_indirect %arg0(%arg1) : (f32) -> i32 // CHECK-NEXT: llvm.return %0 : i32 return %0 : i32 diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir --- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir @@ -370,7 +370,7 @@ // CHECK-NEXT: llvm.br ^bb1([[init]] : i32) // CHECK-NEXT: ^bb1([[loaded:%.*]]: i32): // CHECK-NEXT: [[pair:%.*]] = llvm.cmpxchg %{{.*}}, [[loaded]], [[loaded]] - // CHECK-SAME: acq_rel monotonic : i32 + // CHECK-SAME: acq_rel monotonic : !llvm.ptr, i32, i32 // CHECK-NEXT: [[new:%.*]] = llvm.extractvalue [[pair]][0] // CHECK-NEXT: [[ok:%.*]] = llvm.extractvalue [[pair]][1] // CHECK-NEXT: llvm.cond_br [[ok]], ^bb2, ^bb1([[new]] : i32) diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -176,7 +176,7 @@ func.func @call_non_function_type(%callee : !llvm.func, %arg : i8) { // expected-error@+1 {{expected function type}} - llvm.call %callee(%arg) : !llvm.func + llvm.call (%callee: !llvm.func)(%arg) : !llvm.func llvm.return } @@ -190,9 +190,9 @@ // ----- -func.func @call_non_function_type(%callee : !llvm.func, %arg : i8) { +func.func @call_non_function_type(%callee : !llvm.ptr, %arg : i8) { // expected-error@+1 {{expected function type}} - llvm.call %callee(%arg) : !llvm.func + llvm.call (%callee: !llvm.ptr)(%arg) : !llvm.func llvm.return } @@ -216,7 +216,7 @@ // ----- -func.func @call_non_llvm_indirect(%arg0 : tensor<*xi32>) { +func.func @call_non_llvm_arg(%arg0 : tensor<*xi32>) { // expected-error@+1 {{'llvm.call' op operand #0 must be LLVM dialect-compatible type}} "llvm.call"(%arg0) : (tensor<*xi32>) -> () llvm.return @@ -224,6 +224,14 @@ // ----- +func.func @call_non_llvm_res(%callee : !llvm.ptr) { + // expected-error@+1 {{'llvm.call' op result #0 must be LLVM dialect-compatible type}} + llvm.call (%callee: !llvm.ptr)() : () -> (tensor<*xi32>) + llvm.return +} + +// ----- + llvm.func @callee_func(i8) -> () func.func @callee_arg_mismatch(%arg0 : i32) { @@ -260,25 +268,9 @@ // ----- -func.func @call_too_many_results(%callee : () -> (i32,i32)) { +func.func @call_too_many_results(%callee : !llvm.ptr) { // expected-error@+1 {{expected function with 0 or 1 result}} - llvm.call %callee() : () -> (i32, i32) - llvm.return -} - -// ----- - -func.func @call_non_llvm_result(%callee : () -> (tensor<*xi32>)) { - // expected-error@+1 {{expected result to have LLVM type}} - llvm.call %callee() : () -> (tensor<*xi32>) - llvm.return -} - -// ----- - -func.func @call_non_llvm_input(%callee : (tensor<*xi32>) -> (), %arg : tensor<*xi32>) { - // expected-error@+1 {{expected LLVM types as inputs}} - llvm.call %callee(%arg) : (tensor<*xi32>) -> () + llvm.call (%callee : !llvm.ptr)() : () -> (i32, i32) llvm.return } @@ -593,7 +585,7 @@ func.func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{expected LLVM IR floating point type}} - %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : i32 + %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : !llvm.ptr, i32 llvm.return } @@ -601,7 +593,7 @@ func.func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr, %i1 : i1) { // expected-error@+1 {{unexpected LLVM IR type for 'xchg' bin_op}} - %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : i1 + %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : !llvm.ptr, i1 llvm.return } @@ -609,7 +601,7 @@ func.func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr, %f32 : f32) { // expected-error@+1 {{expected LLVM IR integer type}} - %0 = llvm.atomicrmw max %f32_ptr, %f32 unordered : f32 + %0 = llvm.atomicrmw max %f32_ptr, %f32 unordered : !llvm.ptr, f32 llvm.return } @@ -640,7 +632,7 @@ func.func @cmpxchg_unexpected_type(%i1_ptr : !llvm.ptr, %i1 : i1) { // expected-error@+1 {{unexpected LLVM IR type}} - %0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : i1 + %0 = llvm.cmpxchg %i1_ptr, %i1, %i1 monotonic monotonic : !llvm.ptr, i1, i1 llvm.return } @@ -648,7 +640,7 @@ func.func @cmpxchg_at_least_monotonic_success(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{ordering must be at least 'monotonic'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 unordered monotonic : !llvm.ptr, i32, i32 llvm.return } @@ -656,7 +648,7 @@ func.func @cmpxchg_at_least_monotonic_failure(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{ordering must be at least 'monotonic'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 monotonic unordered : !llvm.ptr, i32, i32 llvm.return } @@ -664,7 +656,7 @@ func.func @cmpxchg_failure_release(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{failure ordering cannot be 'release' or 'acq_rel'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel release : !llvm.ptr, i32, i32 llvm.return } @@ -672,7 +664,7 @@ func.func @cmpxchg_failure_acq_rel(%i32_ptr : !llvm.ptr, %i32 : i32) { // expected-error@+1 {{failure ordering cannot be 'release' or 'acq_rel'}} - %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : i32 + %0 = llvm.cmpxchg %i32_ptr, %i32, %i32 acq_rel acq_rel : !llvm.ptr, i32, i32 llvm.return } diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -65,14 +65,17 @@ // CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)> // CHECK: %[[VALUE:.*]] = llvm.extractvalue %[[STRUCT]][0] : !llvm.struct<(i32, f64, i32)> // CHECK: %[[NEW_STRUCT:.*]] = llvm.insertvalue %[[VALUE]], %[[STRUCT]][2] : !llvm.struct<(i32, f64, i32)> -// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> -// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)> +// CHECK: %[[FUNC_TYPED:.*]] = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> +// CHECK: %{{.*}} = llvm.call (%[[FUNC_TYPED]]: !llvm.ptr (i32)>>)(%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)> +// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr +// CHECK: %{{.*}} = llvm.call (%[[FUNC]]: !llvm.ptr)(%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)> %17 = llvm.call @foo(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)> %18 = llvm.extractvalue %17[0] : !llvm.struct<(i32, f64, i32)> %19 = llvm.insertvalue %18, %17[2] : !llvm.struct<(i32, f64, i32)> %20 = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> - %21 = llvm.call %20(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)> - + %21 = llvm.call (%20: !llvm.ptr (i32)>>)(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)> + %35 = llvm.mlir.addressof @foo : !llvm.ptr + %36 = llvm.call (%35: !llvm.ptr)(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)> // Terminator operations and their successors. // @@ -341,16 +344,30 @@ } // CHECK-LABEL: @atomicrmw -func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) { - // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : f32 - %0 = llvm.atomicrmw fadd %ptr, %val monotonic : f32 +func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) { + // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32 + %0 = llvm.atomicrmw fadd %ptr, %val monotonic : !llvm.ptr, f32 + llvm.return +} + +// CHECK-LABEL: @typed_ptr_atomicrmw +func.func @typed_ptr_atomicrmw(%ptr : !llvm.ptr, %val : f32) { + // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32 + %0 = llvm.atomicrmw fadd %ptr, %val monotonic : !llvm.ptr, f32 llvm.return } // CHECK-LABEL: @cmpxchg -func.func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %new : i32) { - // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : i32 - %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : i32 +func.func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %new : i32) { + // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : !llvm.ptr, i32, i32 + %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : !llvm.ptr, i32, i32 + llvm.return +} + +// CHECK-LABEL: @typed_ptr_cmpxchg +func.func @typed_ptr_cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %new : i32) { + // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : !llvm.ptr, i32, i32 + %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : !llvm.ptr, i32, i32 llvm.return } @@ -401,8 +418,22 @@ llvm.invoke @bar(%8, %6, %4) to ^bb2 unwind ^bb1 : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> () // CHECK: ^[[BB4:.*]]: -// CHECK: llvm.return %[[a0]] : i32 +// CHECK: %[[FUNC_TYPED:.*]] = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> +// CHECK: %{{.*}} = llvm.invoke (%[[FUNC_TYPED]]: !llvm.ptr (i32)>>) ^bb4: + %12 = llvm.mlir.addressof @foo : !llvm.ptr (i32)>> + %13 = llvm.invoke (%12: !llvm.ptr (i32)>>)(%7) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, f64, i32)> + +// CHECK: ^[[BB5:.*]]: +// CHECK: %[[FUNC_TYPED:.*]] = llvm.mlir.addressof @foo : !llvm.ptr +// CHECK: %{{.*}} = llvm.invoke (%[[FUNC_TYPED]]: !llvm.ptr) +^bb5: + %14 = llvm.mlir.addressof @foo : !llvm.ptr + %15 = llvm.invoke (%14: !llvm.ptr)(%7) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, f64, i32)> + +// CHECK: ^[[BB4:.*]]: +// CHECK: llvm.return %[[a0]] : i32 +^bb6: llvm.return %0 : i32 } diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -524,7 +524,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield } @@ -1288,7 +1288,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw add %arg2, %2 monotonic : i32 + llvm.atomicrmw add %arg2, %2 monotonic : !llvm.ptr, i32 omp.yield } diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -556,7 +556,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield } diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll --- a/mlir/test/Target/LLVMIR/Import/constant.ll +++ b/mlir/test/Target/LLVMIR/Import/constant.ll @@ -131,7 +131,7 @@ store ptr @callee, ptr %1 ; CHECK: %[[INDIR:.*]] = llvm.load %[[PTR]] : !llvm.ptr -> !llvm.ptr %2 = load ptr, ptr %1 - ; CHECK: llvm.call %[[INDIR]]() + ; CHECK: llvm.call (%[[INDIR]]: !llvm.ptr)() %3 = call i32 %2() ret i32 %3 } @@ -149,7 +149,7 @@ store ptr @callee, ptr %1 ; CHECK: %[[INDIR:.*]] = llvm.load %[[PTR]] : !llvm.ptr -> !llvm.ptr %2 = load ptr, ptr %1 - ; CHECK: llvm.call %[[INDIR]]() + ; CHECK: llvm.call (%[[INDIR]]: !llvm.ptr)() %3 = call i32 %2() ret i32 %3 } diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll --- a/mlir/test/Target/LLVMIR/Import/instructions.ll +++ b/mlir/test/Target/LLVMIR/Import/instructions.ll @@ -367,31 +367,31 @@ ; CHECK-SAME: %[[PTR2:[a-zA-Z0-9]+]] ; CHECK-SAME: %[[VAL2:[a-zA-Z0-9]+]] define void @atomic_rmw(ptr %ptr1, i32 %val1, ptr %ptr2, float %val2) { - ; CHECK: llvm.atomicrmw xchg %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw xchg %[[PTR1]], %[[VAL1]] acquire %1 = atomicrmw xchg ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw add %[[PTR1]], %[[VAL1]] release : i32 + ; CHECK: llvm.atomicrmw add %[[PTR1]], %[[VAL1]] release %2 = atomicrmw add ptr %ptr1, i32 %val1 release - ; CHECK: llvm.atomicrmw sub %[[PTR1]], %[[VAL1]] acq_rel : i32 + ; CHECK: llvm.atomicrmw sub %[[PTR1]], %[[VAL1]] acq_rel %3 = atomicrmw sub ptr %ptr1, i32 %val1 acq_rel - ; CHECK: llvm.atomicrmw _and %[[PTR1]], %[[VAL1]] seq_cst : i32 + ; CHECK: llvm.atomicrmw _and %[[PTR1]], %[[VAL1]] seq_cst %4 = atomicrmw and ptr %ptr1, i32 %val1 seq_cst - ; CHECK: llvm.atomicrmw nand %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw nand %[[PTR1]], %[[VAL1]] acquire %5 = atomicrmw nand ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw _or %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw _or %[[PTR1]], %[[VAL1]] acquire %6 = atomicrmw or ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw _xor %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw _xor %[[PTR1]], %[[VAL1]] acquire %7 = atomicrmw xor ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw max %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw max %[[PTR1]], %[[VAL1]] acquire %8 = atomicrmw max ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw min %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw min %[[PTR1]], %[[VAL1]] acquire %9 = atomicrmw min ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw umax %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw umax %[[PTR1]], %[[VAL1]] acquire %10 = atomicrmw umax ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw umin %[[PTR1]], %[[VAL1]] acquire : i32 + ; CHECK: llvm.atomicrmw umin %[[PTR1]], %[[VAL1]] acquire %11 = atomicrmw umin ptr %ptr1, i32 %val1 acquire - ; CHECK: llvm.atomicrmw fadd %[[PTR2]], %[[VAL2]] acquire : f32 + ; CHECK: llvm.atomicrmw fadd %[[PTR2]], %[[VAL2]] acquire %12 = atomicrmw fadd ptr %ptr2, float %val2 acquire - ; CHECK: llvm.atomicrmw fsub %[[PTR2]], %[[VAL2]] acquire : f32 + ; CHECK: llvm.atomicrmw fsub %[[PTR2]], %[[VAL2]] acquire %13 = atomicrmw fsub ptr %ptr2, float %val2 acquire ret void } @@ -403,9 +403,9 @@ ; CHECK-SAME: %[[VAL1:[a-zA-Z0-9]+]] ; CHECK-SAME: %[[VAL2:[a-zA-Z0-9]+]] define void @atomic_cmpxchg(ptr %ptr1, i32 %val1, i32 %val2) { - ; CHECK: llvm.cmpxchg %[[PTR1]], %[[VAL1]], %[[VAL2]] seq_cst seq_cst : i32 + ; CHECK: llvm.cmpxchg %[[PTR1]], %[[VAL1]], %[[VAL2]] seq_cst seq_cst %1 = cmpxchg ptr %ptr1, i32 %val1, i32 %val2 seq_cst seq_cst - ; CHECK: llvm.cmpxchg %[[PTR1]], %[[VAL1]], %[[VAL2]] monotonic seq_cst : i32 + ; CHECK: llvm.cmpxchg %[[PTR1]], %[[VAL1]], %[[VAL2]] monotonic seq_cst %2 = cmpxchg ptr %ptr1, i32 %val1, i32 %val2 monotonic seq_cst ret void } @@ -429,7 +429,7 @@ ; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]] define void @call_fn_ptr(ptr %fn) { ; CHECK: %[[C0:[0-9]+]] = llvm.mlir.constant(0 : i16) : i16 - ; CHECK: llvm.call %[[PTR]](%[[C0]]) + ; CHECK: llvm.call (%[[PTR]]: !llvm.ptr)(%[[C0]]) call void %fn(i16 0) ret void } diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1009,7 +1009,7 @@ llvm.func @indirect_const_call(%arg0: i64) { // CHECK-NEXT: call void @body(i64 %0) %0 = llvm.mlir.addressof @body : !llvm.ptr> - llvm.call %0(%arg0) : (i64) -> () + llvm.call (%0: !llvm.ptr>)(%arg0) : (i64) -> () // CHECK-NEXT: ret void llvm.return } @@ -1017,7 +1017,7 @@ // CHECK-LABEL: define i32 @indirect_call(ptr {{%.*}}, float {{%.*}}) llvm.func @indirect_call(%arg0: !llvm.ptr>, %arg1: f32) -> i32 { // CHECK-NEXT: %3 = call i32 %0(float %1) - %0 = llvm.call %arg0(%arg1) : (f32) -> i32 + %0 = llvm.call (%arg0: !llvm.ptr>)(%arg1) : (f32) -> i32 // CHECK-NEXT: ret i32 %3 llvm.return %0 : i32 } @@ -1347,38 +1347,38 @@ %f32_ptr : !llvm.ptr, %f32 : f32, %i32_ptr : !llvm.ptr, %i32 : i32) { // CHECK: atomicrmw fadd ptr %{{.*}}, float %{{.*}} monotonic - %0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : f32 + %0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : !llvm.ptr, f32 // CHECK: atomicrmw fsub ptr %{{.*}}, float %{{.*}} monotonic - %1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : f32 + %1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : !llvm.ptr, f32 // CHECK: atomicrmw xchg ptr %{{.*}}, float %{{.*}} monotonic - %2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : f32 + %2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.ptr, f32 // CHECK: atomicrmw add ptr %{{.*}}, i32 %{{.*}} acquire - %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : i32 + %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : !llvm.ptr, i32 // CHECK: atomicrmw sub ptr %{{.*}}, i32 %{{.*}} release - %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : i32 + %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : !llvm.ptr, i32 // CHECK: atomicrmw and ptr %{{.*}}, i32 %{{.*}} acq_rel - %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : i32 + %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.ptr, i32 // CHECK: atomicrmw nand ptr %{{.*}}, i32 %{{.*}} seq_cst - %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : i32 + %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.ptr, i32 // CHECK: atomicrmw or ptr %{{.*}}, i32 %{{.*}} monotonic - %7 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : i32 + %7 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : !llvm.ptr, i32 // CHECK: atomicrmw xor ptr %{{.*}}, i32 %{{.*}} monotonic - %8 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : i32 + %8 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : !llvm.ptr, i32 // CHECK: atomicrmw max ptr %{{.*}}, i32 %{{.*}} monotonic - %9 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : i32 + %9 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : !llvm.ptr, i32 // CHECK: atomicrmw min ptr %{{.*}}, i32 %{{.*}} monotonic - %10 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : i32 + %10 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : !llvm.ptr, i32 // CHECK: atomicrmw umax ptr %{{.*}}, i32 %{{.*}} monotonic - %11 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : i32 + %11 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : !llvm.ptr, i32 // CHECK: atomicrmw umin ptr %{{.*}}, i32 %{{.*}} monotonic - %12 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : i32 + %12 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : !llvm.ptr, i32 llvm.return } // CHECK-LABEL: @cmpxchg llvm.func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %val: i32) { // CHECK: cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic - %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : i32 + %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : !llvm.ptr, i32, i32 // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 0 %1 = llvm.extractvalue %0[0] : !llvm.struct<(i32, i1)> // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 1 diff --git a/mlir/test/Target/LLVMIR/openmp-reduction.mlir b/mlir/test/Target/LLVMIR/openmp-reduction.mlir --- a/mlir/test/Target/LLVMIR/openmp-reduction.mlir +++ b/mlir/test/Target/LLVMIR/openmp-reduction.mlir @@ -17,7 +17,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield } @@ -90,7 +90,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield } @@ -178,7 +178,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield } @@ -261,7 +261,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield } @@ -340,7 +340,7 @@ atomic { ^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr): %2 = llvm.load %arg3 : !llvm.ptr - llvm.atomicrmw fadd %arg2, %2 monotonic : f32 + llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32 omp.yield }