Index: include/llvm-c/Core.h =================================================================== --- include/llvm-c/Core.h +++ include/llvm-c/Core.h @@ -65,6 +65,7 @@ LLVMInvoke = 5, /* removed 6 due to API changes */ LLVMUnreachable = 7, + LLVMCallBr = 67, /* Standard Unary Operators */ LLVMFNeg = 66, Index: include/llvm/Analysis/SparsePropagation.h =================================================================== --- include/llvm/Analysis/SparsePropagation.h +++ include/llvm/Analysis/SparsePropagation.h @@ -340,6 +340,11 @@ return; } + if (isa(TI)) { + Succs.assign(Succs.size(), true); + return; + } + SwitchInst &SI = cast(TI); LatticeVal SCValue; if (AggressiveUndef) Index: include/llvm/Bitcode/LLVMBitCodes.h =================================================================== --- include/llvm/Bitcode/LLVMBitCodes.h +++ include/llvm/Bitcode/LLVMBitCodes.h @@ -470,11 +470,12 @@ FUNC_CODE_INST_SHUFFLEVEC = 8, // SHUFFLEVEC: [ty, opval, opval, opval] FUNC_CODE_INST_CMP = 9, // CMP: [opty, opval, opval, pred] - FUNC_CODE_INST_RET = 10, // RET: [opty,opval] - FUNC_CODE_INST_BR = 11, // BR: [bb#, bb#, cond] or [bb#] - FUNC_CODE_INST_SWITCH = 12, // SWITCH: [opty, op0, op1, ...] - FUNC_CODE_INST_INVOKE = 13, // INVOKE: [attr, fnty, op0,op1, ...] - // 14 is unused. + FUNC_CODE_INST_RET = 10, // RET: [opty,opval] + FUNC_CODE_INST_BR = 11, // BR: [bb#, bb#, cond] or [bb#] + FUNC_CODE_INST_SWITCH = 12, // SWITCH: [opty, op0, op1, ...] + FUNC_CODE_INST_INVOKE = 13, // INVOKE: [attr, fnty, op0,op1, ...] + FUNC_CODE_INST_CALLBR = 14, // CALLBR: [attr, cc, norm, transfs, + // fnty, fnid, args...] FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE FUNC_CODE_INST_PHI = 16, // PHI: [ty, val0,bb0, ...] Index: include/llvm/CodeGen/GlobalISel/IRTranslator.h =================================================================== --- include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -242,6 +242,8 @@ bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder); + bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder); + bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder); /// Translate one of LLVM's cast instructions into MachineInstrs, with the Index: include/llvm/IR/CallSite.h =================================================================== --- include/llvm/IR/CallSite.h +++ include/llvm/IR/CallSite.h @@ -8,8 +8,8 @@ //===----------------------------------------------------------------------===// // // This file defines the CallSite class, which is a handy wrapper for code that -// wants to treat Call and Invoke instructions in a generic way. When in non- -// mutation context (e.g. an analysis) ImmutableCallSite should be used. +// wants to treat Call, Invoke and CallBr instructions in a generic way. When +// in non-mutation context (e.g. an analysis) ImmutableCallSite should be used. // Finally, when some degree of customization is necessary between these two // extremes, CallSiteBase<> can be supplied with fine-tuned parameters. // @@ -18,7 +18,7 @@ // They are efficiently copyable, assignable and constructable, with cost // equivalent to copying a pointer (notice that they have only a single data // member). The internal representation carries a flag which indicates which of -// the two variants is enclosed. This allows for cheaper checks when various +// the three variants is enclosed. This allows for cheaper checks when various // accessors of CallSite are employed. // //===----------------------------------------------------------------------===// @@ -49,46 +49,51 @@ enum ID : unsigned; } -template class CallSiteBase { protected: - PointerIntPair I; + PointerIntPair I; CallSiteBase() = default; - CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); } - CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); } + CallSiteBase(CallTy *CI) : I(CI, 1) { assert(CI); } + CallSiteBase(InvokeTy *II) : I(II, 0) { assert(II); } + CallSiteBase(CallBrTy *CBI) : I(CBI, 2) { assert(CBI); } explicit CallSiteBase(ValTy *II) { *this = get(II); } private: /// This static method is like a constructor. It will create an appropriate - /// call site for a Call or Invoke instruction, but it can also create a null - /// initialized CallSiteBase object for something which is NOT a call site. + /// call site for a Call, Invoke or CallBr instruction, but it can also create + /// a null initialized CallSiteBase object for something which is NOT a call + /// site. static CallSiteBase get(ValTy *V) { if (InstrTy *II = dyn_cast(V)) { if (II->getOpcode() == Instruction::Call) return CallSiteBase(static_cast(II)); else if (II->getOpcode() == Instruction::Invoke) return CallSiteBase(static_cast(II)); + else if (II->getOpcode() == Instruction::CallBr) + return CallSiteBase(static_cast(II)); } return CallSiteBase(); } public: - /// Return true if a CallInst is enclosed. Note that !isCall() does not mean - /// an InvokeInst is enclosed. It may also signify a NULL instruction pointer. - bool isCall() const { return I.getInt(); } + /// Return true if a CallInst is enclosed. + bool isCall() const { return I.getInt() == 1; } - /// Return true if a InvokeInst is enclosed. + /// Return true if a InvokeInst is enclosed. !I.getInt() may also signify a + /// NULL instruction pointer, so check that. bool isInvoke() const { return getInstruction() && !I.getInt(); } + /// Return true if a CallBrInst is enclosed. + bool isCallBr() const { return I.getInt() == 2; } + InstrTy *getInstruction() const { return I.getPointer(); } InstrTy *operator->() const { return I.getPointer(); } explicit operator bool() const { return I.getPointer(); } @@ -98,7 +103,7 @@ /// Return the pointer to function that is being called. ValTy *getCalledValue() const { - assert(getInstruction() && "Not a call or invoke instruction!"); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); return *getCallee(); } @@ -119,12 +124,16 @@ if (CI->isInlineAsm()) return false; } + if (const CallBrInst *CBI = dyn_cast(getInstruction())) { + if (CBI->isInlineAsm()) + return false; + } return true; } /// Set the callee to the specified value. void setCalledFunction(Value *V) { - assert(getInstruction() && "Not a call or invoke instruction!"); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); *getCallee() = V; } @@ -189,7 +198,7 @@ } void setArgument(unsigned ArgNo, Value* newVal) { - assert(getInstruction() && "Not a call or invoke instruction!"); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!"); getInstruction()->setOperand(ArgNo, newVal); } @@ -203,7 +212,7 @@ /// Given a use for an argument, get the argument number that corresponds to /// it. unsigned getArgumentNo(const Use *U) const { - assert(getInstruction() && "Not a call or invoke instruction!"); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); assert(isArgOperand(U) && "Argument # out of range!"); return U - arg_begin(); } @@ -227,7 +236,7 @@ /// Given a use for a data operand, get the data operand number that /// corresponds to it. unsigned getDataOperandNo(const Use *U) const { - assert(getInstruction() && "Not a call or invoke instruction!"); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); assert(isDataOperand(U) && "Data operand # out of range!"); return U - data_operands_begin(); } @@ -237,18 +246,23 @@ using data_operand_iterator = IterTy; /// data_operands_begin/data_operands_end - Return iterators iterating over - /// the call / invoke argument list and bundle operands. For invokes, this is - /// the set of instruction operands except the invoke target and the two - /// successor blocks; and for calls this is the set of instruction operands - /// except the call target. + /// the call / invoke / callbr argument list and bundle operands. For invokes, + /// this is the set of instruction operands except the invoke target and the + /// two successor blocks; for calls this is the set of instruction operands + /// except the call target; for callbrs the number of labels to skip must be + /// determined first. IterTy data_operands_begin() const { - assert(getInstruction() && "Not a call or invoke instruction!"); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); return (*this)->op_begin(); } IterTy data_operands_end() const { - assert(getInstruction() && "Not a call or invoke instruction!"); - return (*this)->op_end() - (isCall() ? 1 : 3); + assert(getInstruction() && "Not a call, invoke or callbr instruction!"); + if (isCallBr()) + return (*this)->op_end() - 2 - + cast(getInstruction())->getNumTransfers(); + else + return (*this)->op_end() - (isCall() ? 1 : 3); } iterator_range data_ops() const { return make_range(data_operands_begin(), data_operands_end()); @@ -277,17 +291,19 @@ return isCall() && cast(getInstruction())->isTailCall(); } -#define CALLSITE_DELEGATE_GETTER(METHOD) \ - InstrTy *II = getInstruction(); \ - return isCall() \ - ? cast(II)->METHOD \ - : cast(II)->METHOD +#define CALLSITE_DELEGATE_GETTER(METHOD) \ + InstrTy *II = getInstruction(); \ + return isCall() ? cast(II)->METHOD \ + : isCallBr() ? cast(II)->METHOD \ + : cast(II)->METHOD -#define CALLSITE_DELEGATE_SETTER(METHOD) \ - InstrTy *II = getInstruction(); \ - if (isCall()) \ - cast(II)->METHOD; \ - else \ +#define CALLSITE_DELEGATE_SETTER(METHOD) \ + InstrTy *II = getInstruction(); \ + if (isCall()) \ + cast(II)->METHOD; \ + else if (isCallBr()) \ + cast(II)->METHOD; \ + else \ cast(II)->METHOD unsigned getNumArgOperands() const { @@ -305,6 +321,8 @@ bool isInlineAsm() const { if (isCall()) return cast(getInstruction())->isInlineAsm(); + if (isCallBr()) + return cast(getInstruction())->isInlineAsm(); return false; } @@ -389,10 +407,10 @@ /// Return true if the data operand at index \p i directly or indirectly has /// the attribute \p A. /// - /// Normal call or invoke arguments have per operand attributes, as specified - /// in the attribute set attached to this instruction, while operand bundle - /// operands may have some attributes implied by the type of its containing - /// operand bundle. + /// Normal call, invoke or callbr arguments have per operand attributes, as + /// specified in the attribute set attached to this instruction, while operand + /// bundle operands may have some attributes implied by the type of its + /// containing operand bundle. bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const { CALLSITE_DELEGATE_GETTER(dataOperandHasImpliedAttr(i, Kind)); } @@ -585,6 +603,8 @@ // above macros to avoid confusion. if (isCall()) cast(II)->getOperandBundlesAsDefs(Defs); + else if (isCallBr()) + cast(II)->getOperandBundlesAsDefs(Defs); else cast(II)->getOperandBundlesAsDefs(Defs); } @@ -662,12 +682,13 @@ class CallSite : public CallSiteBase { + CallBrInst, User::op_iterator> { public: CallSite() = default; CallSite(CallSiteBase B) : CallSiteBase(B) {} CallSite(CallInst *CI) : CallSiteBase(CI) {} CallSite(InvokeInst *II) : CallSiteBase(II) {} + CallSite(CallBrInst *CBI) : CallSiteBase(CBI) {} explicit CallSite(Instruction *II) : CallSiteBase(II) {} explicit CallSite(Value *V) : CallSiteBase(V) {} @@ -713,6 +734,7 @@ ImmutableCallSite() = default; ImmutableCallSite(const CallInst *CI) : CallSiteBase(CI) {} ImmutableCallSite(const InvokeInst *II) : CallSiteBase(II) {} + ImmutableCallSite(const CallBrInst *CBI) : CallSiteBase(CBI) {} explicit ImmutableCallSite(const Instruction *II) : CallSiteBase(II) {} explicit ImmutableCallSite(const Value *V) : CallSiteBase(V) {} ImmutableCallSite(CallSite CS) : CallSiteBase(CS.getInstruction()) {} Index: include/llvm/IR/IRBuilder.h =================================================================== --- include/llvm/IR/IRBuilder.h +++ include/llvm/IR/IRBuilder.h @@ -904,6 +904,24 @@ OpBundles), Name); } + /// \brief Create a callbr instruction. + CallBrInst *CreateCallBr(Value *Callee, BasicBlock *Fallthrough, + SmallVector Transfers, + ArrayRef Args = None, + const Twine &Name = "") { + return Insert(CallBrInst::Create(Callee, Fallthrough, Transfers, Args), + Name); + } + CallBrInst *CreateCallBr(Value *Callee, BasicBlock *Fallthrough, + SmallVector Transfers, + ArrayRef Args, + ArrayRef OpBundles, + const Twine &Name = "") { + return Insert( + CallBrInst::Create(Callee, Fallthrough, Transfers, Args, OpBundles), + Name); + } + ResumeInst *CreateResume(Value *Exn) { return Insert(ResumeInst::Create(Exn)); } Index: include/llvm/IR/InstVisitor.h =================================================================== --- include/llvm/IR/InstVisitor.h +++ include/llvm/IR/InstVisitor.h @@ -218,14 +218,17 @@ RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); } RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); } - // Call and Invoke are slightly different as they delegate first through - // a generic CallSite visitor. + // Call, Invoke and CallBr are slightly different as they delegate first + // through a generic CallSite visitor. RetTy visitCallInst(CallInst &I) { return static_cast(this)->visitCallSite(&I); } RetTy visitInvokeInst(InvokeInst &I) { return static_cast(this)->visitCallSite(&I); } + RetTy visitCallBrInst(CallBrInst &I) { + return static_cast(this)->visitCallSite(&I); + } // While terminators don't have a distinct type modeling them, we support // intercepting them with dedicated a visitor callback. @@ -277,8 +280,8 @@ DELEGATE(Instruction); } - // Provide a legacy visitor for a 'callsite' that visits both calls and - // invokes. + // Provide a legacy visitor for a 'callsite' that visits calls, invokes, + // and calbrs. // // Prefer overriding the type system based `CallBase` instead. RetTy visitCallSite(CallSite CS) { Index: include/llvm/IR/InstrTypes.h =================================================================== --- include/llvm/IR/InstrTypes.h +++ include/llvm/IR/InstrTypes.h @@ -1033,16 +1033,23 @@ return 0; case Instruction::Invoke: return 2; + case Instruction::CallBr: + return getNumSubclassExtraOperandsDynamic(); } llvm_unreachable("Invalid opcode!"); } + /// Get the number of extra operands for instructions that don't have a fixed + /// number of extra operands. + unsigned getNumSubclassExtraOperandsDynamic() const; + public: using Instruction::getContext; static bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Call || - I->getOpcode() == Instruction::Invoke; + I->getOpcode() == Instruction::Invoke || + I->getOpcode() == Instruction::CallBr; } static bool classof(const Value *V) { return isa(V) && classof(cast(V)); Index: include/llvm/IR/Instruction.def =================================================================== --- include/llvm/IR/Instruction.def +++ include/llvm/IR/Instruction.def @@ -135,89 +135,90 @@ HANDLE_TERM_INST ( 8, CleanupRet , CleanupReturnInst) HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst) HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst) - LAST_TERM_INST (10) +HANDLE_TERM_INST (11, CallBr , CallBrInst) // A call-site terminator + LAST_TERM_INST (11) // Standard unary operators... - FIRST_UNARY_INST(11) -HANDLE_UNARY_INST(11, FNeg , UnaryOperator) - LAST_UNARY_INST(11) + FIRST_UNARY_INST(12) +HANDLE_UNARY_INST(12, FNeg , UnaryOperator) + LAST_UNARY_INST(12) // Standard binary operators... - FIRST_BINARY_INST(12) -HANDLE_BINARY_INST(12, Add , BinaryOperator) -HANDLE_BINARY_INST(13, FAdd , BinaryOperator) -HANDLE_BINARY_INST(14, Sub , BinaryOperator) -HANDLE_BINARY_INST(15, FSub , BinaryOperator) -HANDLE_BINARY_INST(16, Mul , BinaryOperator) -HANDLE_BINARY_INST(17, FMul , BinaryOperator) -HANDLE_BINARY_INST(18, UDiv , BinaryOperator) -HANDLE_BINARY_INST(19, SDiv , BinaryOperator) -HANDLE_BINARY_INST(20, FDiv , BinaryOperator) -HANDLE_BINARY_INST(21, URem , BinaryOperator) -HANDLE_BINARY_INST(22, SRem , BinaryOperator) -HANDLE_BINARY_INST(23, FRem , BinaryOperator) + FIRST_BINARY_INST(13) +HANDLE_BINARY_INST(13, Add , BinaryOperator) +HANDLE_BINARY_INST(14, FAdd , BinaryOperator) +HANDLE_BINARY_INST(15, Sub , BinaryOperator) +HANDLE_BINARY_INST(16, FSub , BinaryOperator) +HANDLE_BINARY_INST(17, Mul , BinaryOperator) +HANDLE_BINARY_INST(18, FMul , BinaryOperator) +HANDLE_BINARY_INST(19, UDiv , BinaryOperator) +HANDLE_BINARY_INST(20, SDiv , BinaryOperator) +HANDLE_BINARY_INST(21, FDiv , BinaryOperator) +HANDLE_BINARY_INST(22, URem , BinaryOperator) +HANDLE_BINARY_INST(23, SRem , BinaryOperator) +HANDLE_BINARY_INST(24, FRem , BinaryOperator) // Logical operators (integer operands) -HANDLE_BINARY_INST(24, Shl , BinaryOperator) // Shift left (logical) -HANDLE_BINARY_INST(25, LShr , BinaryOperator) // Shift right (logical) -HANDLE_BINARY_INST(26, AShr , BinaryOperator) // Shift right (arithmetic) -HANDLE_BINARY_INST(27, And , BinaryOperator) -HANDLE_BINARY_INST(28, Or , BinaryOperator) -HANDLE_BINARY_INST(29, Xor , BinaryOperator) - LAST_BINARY_INST(29) +HANDLE_BINARY_INST(25, Shl , BinaryOperator) // Shift left (logical) +HANDLE_BINARY_INST(26, LShr , BinaryOperator) // Shift right (logical) +HANDLE_BINARY_INST(27, AShr , BinaryOperator) // Shift right (arithmetic) +HANDLE_BINARY_INST(28, And , BinaryOperator) +HANDLE_BINARY_INST(29, Or , BinaryOperator) +HANDLE_BINARY_INST(30, Xor , BinaryOperator) + LAST_BINARY_INST(30) // Memory operators... - FIRST_MEMORY_INST(30) -HANDLE_MEMORY_INST(30, Alloca, AllocaInst) // Stack management -HANDLE_MEMORY_INST(31, Load , LoadInst ) // Memory manipulation instrs -HANDLE_MEMORY_INST(32, Store , StoreInst ) -HANDLE_MEMORY_INST(33, GetElementPtr, GetElementPtrInst) -HANDLE_MEMORY_INST(34, Fence , FenceInst ) -HANDLE_MEMORY_INST(35, AtomicCmpXchg , AtomicCmpXchgInst ) -HANDLE_MEMORY_INST(36, AtomicRMW , AtomicRMWInst ) - LAST_MEMORY_INST(36) + FIRST_MEMORY_INST(31) +HANDLE_MEMORY_INST(31, Alloca, AllocaInst) // Stack management +HANDLE_MEMORY_INST(32, Load , LoadInst ) // Memory manipulation instrs +HANDLE_MEMORY_INST(33, Store , StoreInst ) +HANDLE_MEMORY_INST(34, GetElementPtr, GetElementPtrInst) +HANDLE_MEMORY_INST(35, Fence , FenceInst ) +HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst ) +HANDLE_MEMORY_INST(37, AtomicRMW , AtomicRMWInst ) + LAST_MEMORY_INST(37) // Cast operators ... // NOTE: The order matters here because CastInst::isEliminableCastPair // NOTE: (see Instructions.cpp) encodes a table based on this ordering. - FIRST_CAST_INST(37) -HANDLE_CAST_INST(37, Trunc , TruncInst ) // Truncate integers -HANDLE_CAST_INST(38, ZExt , ZExtInst ) // Zero extend integers -HANDLE_CAST_INST(39, SExt , SExtInst ) // Sign extend integers -HANDLE_CAST_INST(40, FPToUI , FPToUIInst ) // floating point -> UInt -HANDLE_CAST_INST(41, FPToSI , FPToSIInst ) // floating point -> SInt -HANDLE_CAST_INST(42, UIToFP , UIToFPInst ) // UInt -> floating point -HANDLE_CAST_INST(43, SIToFP , SIToFPInst ) // SInt -> floating point -HANDLE_CAST_INST(44, FPTrunc , FPTruncInst ) // Truncate floating point -HANDLE_CAST_INST(45, FPExt , FPExtInst ) // Extend floating point -HANDLE_CAST_INST(46, PtrToInt, PtrToIntInst) // Pointer -> Integer -HANDLE_CAST_INST(47, IntToPtr, IntToPtrInst) // Integer -> Pointer -HANDLE_CAST_INST(48, BitCast , BitCastInst ) // Type cast -HANDLE_CAST_INST(49, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast - LAST_CAST_INST(49) - - FIRST_FUNCLETPAD_INST(50) -HANDLE_FUNCLETPAD_INST(50, CleanupPad, CleanupPadInst) -HANDLE_FUNCLETPAD_INST(51, CatchPad , CatchPadInst) - LAST_FUNCLETPAD_INST(51) + FIRST_CAST_INST(38) +HANDLE_CAST_INST(38, Trunc , TruncInst ) // Truncate integers +HANDLE_CAST_INST(39, ZExt , ZExtInst ) // Zero extend integers +HANDLE_CAST_INST(40, SExt , SExtInst ) // Sign extend integers +HANDLE_CAST_INST(41, FPToUI , FPToUIInst ) // floating point -> UInt +HANDLE_CAST_INST(42, FPToSI , FPToSIInst ) // floating point -> SInt +HANDLE_CAST_INST(43, UIToFP , UIToFPInst ) // UInt -> floating point +HANDLE_CAST_INST(44, SIToFP , SIToFPInst ) // SInt -> floating point +HANDLE_CAST_INST(45, FPTrunc , FPTruncInst ) // Truncate floating point +HANDLE_CAST_INST(46, FPExt , FPExtInst ) // Extend floating point +HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer +HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst) // Integer -> Pointer +HANDLE_CAST_INST(49, BitCast , BitCastInst ) // Type cast +HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast + LAST_CAST_INST(50) + + FIRST_FUNCLETPAD_INST(51) +HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst) +HANDLE_FUNCLETPAD_INST(52, CatchPad , CatchPadInst) + LAST_FUNCLETPAD_INST(52) // Other operators... - FIRST_OTHER_INST(52) -HANDLE_OTHER_INST(52, ICmp , ICmpInst ) // Integer comparison instruction -HANDLE_OTHER_INST(53, FCmp , FCmpInst ) // Floating point comparison instr. -HANDLE_OTHER_INST(54, PHI , PHINode ) // PHI node instruction -HANDLE_OTHER_INST(55, Call , CallInst ) // Call a function -HANDLE_OTHER_INST(56, Select , SelectInst ) // select instruction -HANDLE_USER_INST (57, UserOp1, Instruction) // May be used internally in a pass -HANDLE_USER_INST (58, UserOp2, Instruction) // Internal to passes only -HANDLE_OTHER_INST(59, VAArg , VAArgInst ) // vaarg instruction -HANDLE_OTHER_INST(60, ExtractElement, ExtractElementInst)// extract from vector -HANDLE_OTHER_INST(61, InsertElement, InsertElementInst) // insert into vector -HANDLE_OTHER_INST(62, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. -HANDLE_OTHER_INST(63, ExtractValue, ExtractValueInst)// extract from aggregate -HANDLE_OTHER_INST(64, InsertValue, InsertValueInst) // insert into aggregate -HANDLE_OTHER_INST(65, LandingPad, LandingPadInst) // Landing pad instruction. - LAST_OTHER_INST(65) + FIRST_OTHER_INST(53) +HANDLE_OTHER_INST(53, ICmp , ICmpInst ) // Integer comparison instruction +HANDLE_OTHER_INST(54, FCmp , FCmpInst ) // Floating point comparison instr. +HANDLE_OTHER_INST(55, PHI , PHINode ) // PHI node instruction +HANDLE_OTHER_INST(56, Call , CallInst ) // Call a function +HANDLE_OTHER_INST(57, Select , SelectInst ) // select instruction +HANDLE_USER_INST (58, UserOp1, Instruction) // May be used internally in a pass +HANDLE_USER_INST (59, UserOp2, Instruction) // Internal to passes only +HANDLE_OTHER_INST(60, VAArg , VAArgInst ) // vaarg instruction +HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector +HANDLE_OTHER_INST(62, InsertElement, InsertElementInst) // insert into vector +HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. +HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate +HANDLE_OTHER_INST(65, InsertValue, InsertValueInst) // insert into aggregate +HANDLE_OTHER_INST(66, LandingPad, LandingPadInst) // Landing pad instruction. + LAST_OTHER_INST(66) #undef FIRST_TERM_INST #undef HANDLE_TERM_INST Index: include/llvm/IR/Instructions.h =================================================================== --- include/llvm/IR/Instructions.h +++ include/llvm/IR/Instructions.h @@ -3767,6 +3767,248 @@ } //===----------------------------------------------------------------------===// +// CallBrInst Class +//===----------------------------------------------------------------------===// + +/// CallBr instruction, tracking function calls that may not return control but +/// instead transfer it to a third location. The SubclassData field is used to +/// hold the calling convention of the call. +/// +class CallBrInst : public CallBase { + + // FIXME: keep an eye on generating and propagating this! + unsigned NumTransfers; + + CallBrInst(const CallBrInst &BI); + + /// Construct a CallBrInst given a range of arguments. + /// + /// Construct a CallBrInst from a range of arguments + inline CallBrInst(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, int NumOperands, + const Twine &NameStr, Instruction *InsertBefore) + : CallBrInst(cast( + cast(Func->getType())->getElementType()), + Func, Fallthrough, Transfers, Args, Bundles, NumOperands, + NameStr, InsertBefore) { NumTransfers = Transfers.size(); } + + inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, int NumOperands, + const Twine &NameStr, Instruction *InsertBefore); + /// Construct a CallBrInst given a range of arguments. + /// + /// Construct a CallBrInst from a range of arguments + inline CallBrInst(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, int NumOperands, + const Twine &NameStr, BasicBlock *InsertAtEnd); + + void init(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, const Twine &NameStr) { + init(cast( + cast(Func->getType())->getElementType()), + Func, Fallthrough, Transfers, Args, Bundles, NameStr); + } + + void init(FunctionType *FTy, Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, const Twine &NameStr); + + /// Compute the number of operands to allocate. + static int ComputeNumOperands(int NumArgs, int NumTransfers, + int NumBundleInputs = 0) { + // We need one operand for the called function, plus our extra operands and + // the input operand counts provided. + return 2 + NumTransfers + NumArgs + NumBundleInputs; + } + +protected: + // Note: Instruction needs to be a friend here to call cloneImpl. + friend class Instruction; + + CallBrInst *cloneImpl() const; + +public: + static constexpr int ArgOffset = 2; + static CallBrInst *Create(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, + ArrayRef Args, const Twine &NameStr, + Instruction *InsertBefore = nullptr) { + return Create(cast( + cast(Func->getType())->getElementType()), + Func, Fallthrough, Transfers, Args, None, NameStr, + InsertBefore); + } + + static CallBrInst * + Create(Value *Func, BasicBlock *Fallthrough, ArrayRef Transfers, + ArrayRef Args, ArrayRef Bundles = None, + const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { + return Create(cast( + cast(Func->getType())->getElementType()), + Func, Fallthrough, Transfers, Args, Bundles, NameStr, + InsertBefore); + } + + static CallBrInst *Create(FunctionType *Ty, Value *Func, + BasicBlock *Fallthrough, + ArrayRef Transfers, + ArrayRef Args, const Twine &NameStr, + Instruction *InsertBefore = nullptr) { + int NumOperands = ComputeNumOperands(Args.size(), Transfers.size()); + return new (NumOperands) + CallBrInst(Ty, Func, Fallthrough, Transfers, Args, None, NumOperands, + NameStr, InsertBefore); + } + + static CallBrInst * + Create(FunctionType *Ty, Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles = None, const Twine &NameStr = "", + Instruction *InsertBefore = nullptr) { + int NumOperands = ComputeNumOperands(Args.size(), Transfers.size(), + CountBundleInputs(Bundles)); + unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); + + return new (NumOperands, DescriptorBytes) + CallBrInst(Ty, Func, Fallthrough, Transfers, Args, Bundles, NumOperands, + NameStr, InsertBefore); + } + + static CallBrInst *Create(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, + ArrayRef Args, const Twine &NameStr, + BasicBlock *InsertAtEnd) { + int NumOperands = ComputeNumOperands(Args.size(), Transfers.size()); + return new (NumOperands) + CallBrInst(Func, Fallthrough, Transfers, Args, None, NumOperands, + NameStr, InsertAtEnd); + } + + static CallBrInst *Create(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, + ArrayRef Args, + ArrayRef Bundles, + const Twine &NameStr, BasicBlock *InsertAtEnd) { + int NumOperands = ComputeNumOperands(Args.size(), Transfers.size(), + CountBundleInputs(Bundles)); + unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); + + return new (NumOperands, DescriptorBytes) + CallBrInst(Func, Fallthrough, Transfers, Args, Bundles, NumOperands, + NameStr, InsertAtEnd); + } + + /// Create a clone of \p CBI with a different set of operand bundles and + /// insert it before \p InsertPt. + /// + /// The returned callbr instruction is identical to \p CBI in every way + /// except that the operand bundles for the new instruction are set to the + /// operand bundles in \p Bundles. + static CallBrInst *Create(CallBrInst *CBI, + ArrayRef Bundles, + Instruction *InsertPt = nullptr); + + /// Return the number of callbr transfer labels. + /// + unsigned getNumTransfers() const { return NumTransfers; } + + /// getTransferLabel - Return the i-th transfer label. + /// + Value *getTransferLabel(unsigned i) const { + assert(i < getNumTransfers() && "Out of bounds!"); + return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + + 1); + } + + Value *getTransferLabelUse(unsigned i) const { + assert(i < getNumTransfers() && "Out of bounds!"); + return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + + 1); + } + + /// Check if the CallBr is an asm-goto + bool isInlineAsm() const { return isa(getCalledValue()); } + + // Return the destination basic blocks... + BasicBlock *getFallthrough() const { + return cast(*(&Op<-1>() - getNumTransfers() - 1)); + } + BasicBlock *getTransfer(unsigned i) const { + return cast(*(&Op<-1>() - getNumTransfers() + i)); + } + SmallVector getTransfers() const { + SmallVector Transfers; + for (unsigned i = 0, e = getNumTransfers(); i < e; ++i) + Transfers.push_back(getTransfer(i)); + return Transfers; + } + void setFallthrough(BasicBlock *B) { + *(&Op<-1>() - getNumTransfers() - 1) = reinterpret_cast(B); + } + void setTransfer(unsigned i, BasicBlock *B) { + *(&Op<-1>() - getNumTransfers() + i) = reinterpret_cast(B); + } + + BasicBlock *getSuccessor(unsigned i) const { + assert(i < getNumTransfers() + 1 && "Successor # out of range for callbr!"); + return i == 0 ? getFallthrough() : getTransfer(i - 1); + } + + void setSuccessor(unsigned idx, BasicBlock *NewSucc) { + assert(idx < getNumTransfers() + 1 && + "Successor # out of range for callbr!"); + *(&Op<-1>() - getNumTransfers() -1 + idx) = + reinterpret_cast(NewSucc); + } + + unsigned getNumSuccessors() const { return getNumTransfers() + 1; } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static bool classof(const Instruction *I) { + return (I->getOpcode() == Instruction::CallBr); + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } + +private: + + // Shadow Instruction::setInstructionSubclassData with a private forwarding + // method so that subclasses cannot accidentally use it. + void setInstructionSubclassData(unsigned short D) { + Instruction::setInstructionSubclassData(D); + } +}; + +CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, int NumOperands, + const Twine &NameStr, Instruction *InsertBefore) + : CallBase(Ty->getReturnType(), Instruction::CallBr, + OperandTraits::op_end(this) - NumOperands, NumOperands, + InsertBefore) { + init(Ty, Func, Fallthrough, Transfers, Args, Bundles, NameStr); +} + +CallBrInst::CallBrInst(Value *Func, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, int NumOperands, + const Twine &NameStr, BasicBlock *InsertAtEnd) + : CallBase( + cast( + cast(Func->getType())->getElementType()) + ->getReturnType(), + Instruction::CallBr, + OperandTraits::op_end(this) - NumOperands, NumOperands, + InsertAtEnd) { + init(Func, Fallthrough, Transfers, Args, Bundles, NameStr); +} + +//===----------------------------------------------------------------------===// // ResumeInst Class //===----------------------------------------------------------------------===// Index: lib/AsmParser/LLLexer.cpp =================================================================== --- lib/AsmParser/LLLexer.cpp +++ lib/AsmParser/LLLexer.cpp @@ -554,6 +554,7 @@ KEYWORD(acq_rel); KEYWORD(seq_cst); KEYWORD(syncscope); + KEYWORD(jump); KEYWORD(nnan); KEYWORD(ninf); @@ -859,6 +860,7 @@ INSTKEYWORD(invoke, Invoke); INSTKEYWORD(resume, Resume); INSTKEYWORD(unreachable, Unreachable); + INSTKEYWORD(callbr, CallBr); INSTKEYWORD(alloca, Alloca); INSTKEYWORD(load, Load); Index: lib/AsmParser/LLParser.h =================================================================== --- lib/AsmParser/LLParser.h +++ lib/AsmParser/LLParser.h @@ -571,6 +571,7 @@ bool ParseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS); bool ParseCatchPad(Instruction *&Inst, PerFunctionState &PFS); bool ParseCleanupPad(Instruction *&Inst, PerFunctionState &PFS); + bool ParseCallBr(Instruction *&Inst, PerFunctionState &PFS); bool ParseUnaryOp(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc, unsigned OperandType); Index: lib/AsmParser/LLParser.cpp =================================================================== --- lib/AsmParser/LLParser.cpp +++ lib/AsmParser/LLParser.cpp @@ -164,6 +164,14 @@ AS = AS.addAttributes(Context, AttributeList::FunctionIndex, AttributeSet::get(Context, FnAttrs)); II->setAttributes(AS); + } else if (CallBrInst *CBI = dyn_cast(V)) { + AttributeList AS = CBI->getAttributes(); + AttrBuilder FnAttrs(AS.getFnAttributes()); + AS = AS.removeAttributes(Context, AttributeList::FunctionIndex); + FnAttrs.merge(B); + AS = AS.addAttributes(Context, AttributeList::FunctionIndex, + AttributeSet::get(Context, FnAttrs)); + CBI->setAttributes(AS); } else if (auto *GV = dyn_cast(V)) { AttrBuilder Attrs(GV->getAttributes()); Attrs.merge(B); @@ -5567,6 +5575,7 @@ case lltok::kw_catchswitch: return ParseCatchSwitch(Inst, PFS); case lltok::kw_catchpad: return ParseCatchPad(Inst, PFS); case lltok::kw_cleanuppad: return ParseCleanupPad(Inst, PFS); + case lltok::kw_callbr: return ParseCallBr(Inst, PFS); // Unary Operators. case lltok::kw_fneg: { FastMathFlags FMF = EatFastMathFlagsIfPresent(); @@ -6185,6 +6194,124 @@ return false; } +/// ParseCallBr +/// ::= 'callbr' OptionalCallingConv OptionalAttrs Type Value ParamList +/// OptionalAttrs 'to' TypeAndValue 'or jump' '[' LabelList ']' +bool LLParser::ParseCallBr(Instruction *&Inst, PerFunctionState &PFS) { + LocTy CallLoc = Lex.getLoc(); + AttrBuilder RetAttrs, FnAttrs; + std::vector FwdRefAttrGrps; + LocTy NoBuiltinLoc; + unsigned CC; + Type *RetType = nullptr; + LocTy RetTypeLoc; + ValID CalleeID; + SmallVector ArgList; + SmallVector BundleList; + + BasicBlock *Fallthrough; + if (ParseOptionalCallingConv(CC) || ParseOptionalReturnAttrs(RetAttrs) || + ParseType(RetType, RetTypeLoc, true /*void allowed*/) || + ParseValID(CalleeID) || ParseParameterList(ArgList, PFS) || + ParseFnAttributeValuePairs(FnAttrs, FwdRefAttrGrps, false, + NoBuiltinLoc) || + ParseOptionalOperandBundles(BundleList, PFS) || + ParseToken(lltok::kw_to, "expected 'to' in callbr") || + ParseTypeAndBasicBlock(Fallthrough, PFS) || + ParseToken(lltok::kw_or, "expected 'or jump' in callbr") || + ParseToken(lltok::kw_jump, "expected 'or jump' in callbr") || + ParseToken(lltok::lsquare, "expected '[' in callbr")) + return true; + + // Parse the destination list. + SmallVector Transfers; + + if (Lex.getKind() != lltok::rsquare) { + BasicBlock *DestBB; + if (ParseTypeAndBasicBlock(DestBB, PFS)) + return true; + Transfers.push_back(DestBB); + + while (EatIfPresent(lltok::comma)) { + if (ParseTypeAndBasicBlock(DestBB, PFS)) + return true; + Transfers.push_back(DestBB); + } + } + + if (ParseToken(lltok::rsquare, "expected ']' at end of block list")) + return true; + + // If RetType is a non-function pointer type, then this is the short syntax + // for the call, which means that RetType is just the return type. Infer the + // rest of the function argument types from the arguments that are present. + FunctionType *Ty = dyn_cast(RetType); + if (!Ty) { + // Pull out the types of all of the arguments... + std::vector ParamTypes; + for (unsigned i = 0, e = ArgList.size(); i != e; ++i) + ParamTypes.push_back(ArgList[i].V->getType()); + + if (!FunctionType::isValidReturnType(RetType)) + return Error(RetTypeLoc, "Invalid result type for LLVM function"); + + Ty = FunctionType::get(RetType, ParamTypes, false); + } + + CalleeID.FTy = Ty; + + // Look up the callee. + Value *Callee; + if (ConvertValIDToValue(PointerType::getUnqual(Ty), CalleeID, Callee, &PFS, + /*IsCall=*/true)) + return true; + + if (isa(Callee) && !Ty->getReturnType()->isVoidTy()) + return Error(RetTypeLoc, "asm-goto outputs not supported"); + + // Set up the Attribute for the function. + SmallVector Args; + SmallVector ArgAttrs; + + // Loop through FunctionType's arguments and ensure they are specified + // correctly. Also, gather any parameter attributes. + FunctionType::param_iterator I = Ty->param_begin(); + FunctionType::param_iterator E = Ty->param_end(); + for (unsigned i = 0, e = ArgList.size(); i != e; ++i) { + Type *ExpectedTy = nullptr; + if (I != E) { + ExpectedTy = *I++; + } else if (!Ty->isVarArg()) { + return Error(ArgList[i].Loc, "too many arguments specified"); + } + + if (ExpectedTy && ExpectedTy != ArgList[i].V->getType()) + return Error(ArgList[i].Loc, "argument is not of expected type '" + + getTypeString(ExpectedTy) + "'"); + Args.push_back(ArgList[i].V); + ArgAttrs.push_back(ArgList[i].Attrs); + } + + if (I != E) + return Error(CallLoc, "not enough parameters specified for call"); + + if (FnAttrs.hasAlignmentAttr()) + return Error(CallLoc, "callbr instructions may not have an alignment"); + + // Finish off the Attribute and check them + AttributeList PAL = + AttributeList::get(Context, AttributeSet::get(Context, FnAttrs), + AttributeSet::get(Context, RetAttrs), ArgAttrs); + + CallBrInst *CBI = + CallBrInst::Create(Ty, Callee, Fallthrough, Transfers, Args, BundleList); + CBI->setCallingConv(CC); + CBI->setAttributes(PAL); + ForwardRefAttrGroups[CBI] = FwdRefAttrGrps; + Inst = CBI; + return false; +} + //===----------------------------------------------------------------------===// // Binary Operators. //===----------------------------------------------------------------------===// Index: lib/AsmParser/LLToken.h =================================================================== --- lib/AsmParser/LLToken.h +++ lib/AsmParser/LLToken.h @@ -98,6 +98,7 @@ kw_acq_rel, kw_seq_cst, kw_syncscope, + kw_jump, kw_nnan, kw_ninf, kw_nsz, @@ -328,6 +329,7 @@ kw_catchret, kw_catchpad, kw_cleanuppad, + kw_callbr, kw_alloca, kw_load, Index: lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- lib/Bitcode/Reader/BitcodeReader.cpp +++ lib/Bitcode/Reader/BitcodeReader.cpp @@ -4227,6 +4227,74 @@ InstructionList.push_back(I); break; } + case bitc::FUNC_CODE_INST_CALLBR: { + // CALLBR: [attr, cc, norm, transfs, fty, fnid, args] + unsigned OpNum = 0; + AttributeList PAL = getAttributes(Record[OpNum++]); + unsigned CCInfo = Record[OpNum++]; + + BasicBlock *Fallthrough = getBasicBlock(Record[OpNum++]); + unsigned NumTransfers = Record[OpNum++]; + SmallVector Transfers; + for (unsigned i = 0, e = NumTransfers; i != e; ++i) + Transfers.push_back(getBasicBlock(Record[OpNum++])); + + FunctionType *FTy = nullptr; + if (CCInfo >> bitc::CALL_EXPLICIT_TYPE & 1 && + !(FTy = dyn_cast(getTypeByID(Record[OpNum++])))) + return error("Explicit call type is not a function type"); + + Value *Callee; + if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) + return error("Invalid record"); + + PointerType *OpTy = dyn_cast(Callee->getType()); + if (!OpTy) + return error("Callee is not a pointer type"); + if (!FTy) { + FTy = dyn_cast(OpTy->getElementType()); + if (!FTy) + return error("Callee is not of pointer to function type"); + } else if (OpTy->getElementType() != FTy) + return error("Explicit call type does not match pointee type of " + "callee operand"); + if (Record.size() < FTy->getNumParams() + OpNum) + return error("Insufficient operands to call"); + + SmallVector Args; + // Read the fixed params. + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { + if (FTy->getParamType(i)->isLabelTy()) + Args.push_back(getBasicBlock(Record[OpNum])); + else + Args.push_back(getValue(Record, OpNum, NextValueNo, + FTy->getParamType(i))); + if (!Args.back()) + return error("Invalid record"); + } + + // Read type/value pairs for varargs params. + if (!FTy->isVarArg()) { + if (OpNum != Record.size()) + return error("Invalid record"); + } else { + while (OpNum != Record.size()) { + Value *Op; + if (getValueTypePair(Record, OpNum, NextValueNo, Op)) + return error("Invalid record"); + Args.push_back(Op); + } + } + + I = CallBrInst::Create(FTy, Callee, Fallthrough, Transfers, Args, + OperandBundles); + OperandBundles.clear(); + InstructionList.push_back(I); + cast(I)->setCallingConv( + static_cast((0x7ff & CCInfo) >> bitc::CALL_CCONV)); + cast(I)->setAttributes(PAL); + break; + } case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE I = new UnreachableInst(Context); InstructionList.push_back(I); Index: lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- lib/Bitcode/Writer/BitcodeWriter.cpp +++ lib/Bitcode/Writer/BitcodeWriter.cpp @@ -2776,6 +2776,41 @@ Vals.push_back(VE.getValueID(CatchSwitch.getUnwindDest())); break; } + case Instruction::CallBr: { + const CallBrInst *CBI = cast(&I); + const Value *Callee = CBI->getCalledValue(); + FunctionType *FTy = CBI->getFunctionType(); + + if (CBI->hasOperandBundles()) + writeOperandBundles(CBI, InstID); + + Code = bitc::FUNC_CODE_INST_CALLBR; + + Vals.push_back(VE.getAttributeListID(CBI->getAttributes())); + + Vals.push_back(CBI->getCallingConv() << bitc::CALL_CCONV | + 1 << bitc::CALL_EXPLICIT_TYPE); + + Vals.push_back(VE.getValueID(CBI->getFallthrough())); + Vals.push_back(CBI->getNumTransfers()); + for (unsigned i = 0, e = CBI->getNumTransfers(); i != e; ++i) + Vals.push_back(VE.getValueID(CBI->getTransfer(i))); + + Vals.push_back(VE.getTypeID(FTy)); + pushValueAndType(Callee, InstID, Vals); + + // Emit value #'s for the fixed parameters. + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) + pushValue(I.getOperand(i), InstID, Vals); // fixed param. + + // Emit type/value pairs for varargs params. + if (FTy->isVarArg()) { + for (unsigned i = FTy->getNumParams(), e = CBI->getNumArgOperands(); + i != e; ++i) + pushValueAndType(I.getOperand(i), InstID, Vals); // vararg + } + break; + } case Instruction::Unreachable: Code = bitc::FUNC_CODE_INST_UNREACHABLE; AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV; Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2996,6 +2996,29 @@ } } + // FIXME: this is an asm-goto problem. INLINEASM as it is now is not a + // terminator and won't pop up on the previous checks. This checks whether + // the last non-terminator instruction is an inline asm referencing this BB. + // If it is, we assume asm-goto with a jump to here and a fallthrough that got + // moved elsewhere. + auto AGI = Pred->getLastNonDebugInstr(); + if (AGI != Pred->end()) { + // FIXME: this is crude, but the asm-goto sometimes seems to be the second- + // to-last operand. Find a prettier way to check this. + const auto &AG = *AGI; + if (AG.isInlineAsm()) + for (const auto &OP : AG.operands()) + if (OP.isMBB() && OP.getMBB() == MBB) + return false; + if (AGI != Pred->getFirstNonDebugInstr()) { + const auto &AG1 = *(--AGI); + if (AG1.isInlineAsm()) + for (const auto &OP : AG1.operands()) + if (OP.isMBB() && OP.getMBB() == MBB) + return false; + } + } + return true; } Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -654,6 +654,19 @@ BB->getSinglePredecessor()->getSingleSuccessor())) return false; + // Skip merging if the block's successor is also a successor to any callbr + // that leads to this block. + for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { + auto CBI = dyn_cast((*I)->getTerminator()); + if (CBI) { + if (DestBB == CBI->getFallthrough()) + return false; + for (BasicBlock *CBBB : CBI->getTransfers()) + if (DestBB == CBBB) + return false; + } + } + // Try to skip merging if the unique predecessor of BB is terminated by a // switch or indirect branch instruction, and BB is used as an incoming block // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to Index: lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- lib/CodeGen/GlobalISel/IRTranslator.cpp +++ lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1230,6 +1230,42 @@ return true; } +bool IRTranslator::translateCallBr(const User &U, + MachineIRBuilder &MIRBuilder) { + const CallBrInst &I = cast(U); + + const BasicBlock *ReturnBB = I.getFallthrough(); + const SmallVector Transfers = I.getTransfers(); + + const Value *Callee = I.getCalledValue(); + const Function *Fn = dyn_cast(Callee); + + if (Fn && Fn->isIntrinsic()) + return false; + + // FIXME: support whatever these are. + if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) + return false; + + unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I); + SmallVector Args; + for (auto &Arg : I.arg_operands()) + Args.push_back(getOrCreateVReg(*Arg)); + + if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, + [&]() { return getOrCreateVReg(*I.getCalledValue()); })) + return false; + + // FIXME: track probabilities. + MachineBasicBlock &ReturnMBB = getMBB(*ReturnBB); + MIRBuilder.getMBB().addSuccessor(&ReturnMBB); + for (unsigned i = 0, e = Transfers.size(); i < e; ++i) + MIRBuilder.getMBB().addSuccessor(&getMBB(*Transfers[i])); + MIRBuilder.buildBr(ReturnMBB); + + return true; +} + bool IRTranslator::translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder) { const LandingPadInst &LP = cast(U); Index: lib/CodeGen/IndirectBrExpandPass.cpp =================================================================== --- lib/CodeGen/IndirectBrExpandPass.cpp +++ lib/CodeGen/IndirectBrExpandPass.cpp @@ -149,11 +149,9 @@ ConstantInt *BBIndexC = ConstantInt::get(ITy, BBIndex); // Now rewrite the blockaddress to an integer constant based on the index. - // FIXME: We could potentially preserve the uses as arguments to inline asm. - // This would allow some uses such as diagnostic information in crashes to - // have higher quality even when this transform is enabled, but would break - // users that round-trip blockaddresses through inline assembly and then - // back into an indirectbr. + // FIXME: this part doesn't properly recognize other uses of blockaddress + // expressions, for instance, where they are used to pass labels to + // asm-goto. This part of the pass needs a rework. BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(BBIndexC, BA->getType())); } Index: lib/CodeGen/MachineBasicBlock.cpp =================================================================== --- lib/CodeGen/MachineBasicBlock.cpp +++ lib/CodeGen/MachineBasicBlock.cpp @@ -1198,9 +1198,13 @@ assert(Old != New && "Cannot replace self with self!"); MachineBasicBlock::instr_iterator I = instr_end(); + int Stop = 0; while (I != instr_begin()) { --I; - if (!I->isTerminator()) break; + if (!I->isTerminator() && !I->isInlineAsm()) ++Stop; + // FIXME: this is crude, but the asm-goto sometimes seems to be the second- + // to-last operand. Find a prettier way to check this. + if (Stop > 1 || I->isPHI()) break; // Scan the operands of this machine instruction, replacing any uses of Old // with New. Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -47,6 +47,7 @@ class BasicBlock; class BranchInst; class CallInst; +class CallBrInst; class CatchPadInst; class CatchReturnInst; class CatchSwitchInst; @@ -852,6 +853,7 @@ private: // These all get lowered before this pass. void visitInvoke(const InvokeInst &I); + void visitCallBr(const CallBrInst &I); void visitResume(const ResumeInst &I); void visitUnary(const User &I, unsigned Opcode); Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2528,6 +2528,61 @@ InvokeMBB->normalizeSuccProbs(); // Drop into normal successor. + DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), + DAG.getBasicBlock(Return))); +} + +void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) { + MachineBasicBlock *CallBrMBB = FuncInfo.MBB; + + // Retrieve successors. Look through artificial IR level blocks like + // catchswitch for successors. + MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; + const SmallVector Transfers = I.getTransfers(); + + // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't + // have to do anything here to lower funclet bundles. + assert(!I.hasOperandBundlesOtherThan( + {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && + "Cannot lower callbrs with arbitrary operand bundles yet!"); + + const Value *Callee(I.getCalledValue()); + const Function *Fn = dyn_cast(Callee); + if (isa(Callee)) + visitInlineAsm(&I); + else if (Fn && Fn->isIntrinsic()) { + switch (Fn->getIntrinsicID()) { + default: + llvm_unreachable("Cannot callbr this intrinsic"); + case Intrinsic::donothing: + // Ignore callbrs to @llvm.donothing: jump directly to the next BB. + break; + } + } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) { + // Currently we do not lower any intrinsic calls with deopt operand bundles. + // Eventually we will support lowering the @llvm.experimental.deoptimize + // intrinsic, and right now there are no plans to support other intrinsics + // with deopt state. + LowerCallSiteWithDeoptBundle(&I, getValue(Callee), nullptr); + } else { + LowerCallTo(&I, getValue(Callee), false, nullptr); + } + + // If the value of the callbr is used outside of its defining block, make it + // available as a virtual register. + // We already took care of the exported value for the statepoint instruction + // during call to the LowerStatepoint. + if (!isStatepoint(I)) { + CopyToExportRegsIfNeeded(&I); + } + + // Update successor info. + addSuccessorWithProb(CallBrMBB, Return); + for (unsigned i = 0, e = Transfers.size(); i < e; ++i) + addSuccessorWithProb(CallBrMBB, FuncInfo.MBBMap[Transfers[i]]); + CallBrMBB->normalizeSuccProbs(); + + // Drop into normal successor. DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(Return))); @@ -7521,7 +7576,19 @@ // Compute the value type for each operand. if (OpInfo.Type == InlineAsm::isInput || (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) { - OpInfo.CallOperandVal = const_cast(CS.getArgument(ArgNo++)); + // Transfer labels for asm-goto need to be unpacked from the blockaddress + // statements escaping them in IR. + if (const CallBrInst *CBI = dyn_cast(CS.getInstruction())) { + if (ArgNo >= CBI->getNumArgOperands() - CBI->getNumTransfers()) { + const BlockAddress *BA = + dyn_cast(CS.getArgument(ArgNo++)); + assert(BA && "Invalid asm-goto blockaddress argument!"); + BasicBlock *BB = BA->getBasicBlock(); + OpInfo.CallOperandVal = BB; + } else + OpInfo.CallOperandVal = const_cast(CS.getArgument(ArgNo++)); + } else + OpInfo.CallOperandVal = const_cast(CS.getArgument(ArgNo++)); // Process the call argument. BasicBlocks are labels, currently appearing // only in asm's. Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -1447,6 +1447,7 @@ case Switch: return 0; case IndirectBr: return 0; case Invoke: return 0; + case CallBr: return 0; case Resume: return 0; case Unreachable: return 0; case CleanupRet: return 0; Index: lib/IR/AsmWriter.cpp =================================================================== --- lib/IR/AsmWriter.cpp +++ lib/IR/AsmWriter.cpp @@ -3837,6 +3837,52 @@ writeOperand(II->getNormalDest(), true); Out << " unwind "; writeOperand(II->getUnwindDest(), true); + } else if (const CallBrInst *CBI = dyn_cast(&I)) { + Operand = CBI->getCalledValue(); + FunctionType *FTy = CBI->getFunctionType(); + Type *RetTy = FTy->getReturnType(); + const AttributeList &PAL = CBI->getAttributes(); + + // Print the calling convention being used. + if (CBI->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(CBI->getCallingConv(), Out); + } + + if (PAL.hasAttributes(AttributeList::ReturnIndex)) + Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex); + + // If possible, print out the short form of the callbr instruction. We can + // only do this if the first argument is a pointer to a nonvararg function, + // and if the return type is not a pointer to a function. + // + Out << ' '; + TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out); + Out << ' '; + writeOperand(Operand, false); + Out << '('; + for (unsigned op = 0, Eop = CBI->getNumArgOperands(); op < Eop; ++op) { + if (op) + Out << ", "; + writeParamOperand(CBI->getArgOperand(op), PAL.getParamAttributes(op)); + } + + Out << ')'; + if (PAL.hasAttributes(AttributeList::FunctionIndex)) + Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes()); + + writeOperandBundles(CBI); + + Out << "\n to "; + writeOperand(CBI->getFallthrough(), true); + Out << " or jump ["; + SmallVector Transfers = CBI->getTransfers(); + for (unsigned i = 0, e = Transfers.size(); i != e; ++i) { + if (i != 0) + Out << ", "; + writeOperand(Transfers[i], true); + } + Out << ']'; } else if (const AllocaInst *AI = dyn_cast(&I)) { Out << ' '; if (AI->isUsedWithInAlloca()) Index: lib/IR/Instruction.cpp =================================================================== --- lib/IR/Instruction.cpp +++ lib/IR/Instruction.cpp @@ -302,6 +302,7 @@ case CatchRet: return "catchret"; case CatchPad: return "catchpad"; case CatchSwitch: return "catchswitch"; + case CallBr: return "callbr"; // Standard unary operators... case FNeg: return "fneg"; @@ -406,6 +407,10 @@ return CI->getCallingConv() == cast(I2)->getCallingConv() && CI->getAttributes() == cast(I2)->getAttributes() && CI->hasIdenticalOperandBundleSchema(*cast(I2)); + if (const CallBrInst *CI = dyn_cast(I1)) + return CI->getCallingConv() == cast(I2)->getCallingConv() && + CI->getAttributes() == cast(I2)->getAttributes() && + CI->hasIdenticalOperandBundleSchema(*cast(I2)); if (const InsertValueInst *IVI = dyn_cast(I1)) return IVI->getIndices() == cast(I2)->getIndices(); if (const ExtractValueInst *EVI = dyn_cast(I1)) @@ -519,6 +524,8 @@ return !cast(this)->doesNotAccessMemory(); case Instruction::Invoke: return !cast(this)->doesNotAccessMemory(); + case Instruction::CallBr: + return !cast(this)->doesNotAccessMemory(); case Instruction::Store: return !cast(this)->isUnordered(); } @@ -539,6 +546,8 @@ return !cast(this)->onlyReadsMemory(); case Instruction::Invoke: return !cast(this)->onlyReadsMemory(); + case Instruction::CallBr: + return !cast(this)->onlyReadsMemory(); case Instruction::Load: return !cast(this)->isUnordered(); } @@ -775,8 +784,9 @@ } void Instruction::setProfWeight(uint64_t W) { - assert((isa(this) || isa(this)) && - "Can only set weights for call and invoke instrucitons"); + assert((isa(this) || isa(this) || + isa(this)) && + "Can only set weights for call, invoke and callbr instrucitons"); SmallVector Weights; Weights.push_back(W); MDBuilder MDB(getContext()); Index: lib/IR/Instructions.cpp =================================================================== --- lib/IR/Instructions.cpp +++ lib/IR/Instructions.cpp @@ -257,6 +257,11 @@ Function *CallBase::getCaller() { return getParent()->getParent(); } +unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { + assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!"); + return cast(this)->getNumTransfers() + 1; +} + bool CallBase::isIndirectCall() const { const Value *V = getCalledValue(); if (isa(V) || isa(V)) @@ -718,6 +723,72 @@ } //===----------------------------------------------------------------------===// +// CallBrInst Implementation +//===----------------------------------------------------------------------===// + +void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough, + ArrayRef Transfers, ArrayRef Args, + ArrayRef Bundles, + const Twine &NameStr) { + this->FTy = FTy; + + assert(getNumOperands() == + 2 + Transfers.size() + Args.size() + CountBundleInputs(Bundles) && + "NumOperands not set up?"); + NumTransfers = Transfers.size(); + setCalledFunction(Fn); + setFallthrough(Fallthrough); + for (unsigned i = 0, e = Transfers.size(); i < e; ++i) + setTransfer(i, Transfers[i]); + +#ifndef NDEBUG + assert(((Args.size() == FTy->getNumParams()) || + (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && + "Calling a function with bad signature"); + + for (unsigned i = 0, e = Args.size(); i != e; i++) + assert((i >= FTy->getNumParams() || + FTy->getParamType(i) == Args[i]->getType()) && + "Calling a function with a bad signature!"); +#endif + + std::copy(Args.begin(), Args.end(), op_begin()); + + auto It = populateBundleOperandInfos(Bundles, Args.size()); + (void)It; + assert(It + 2 + Transfers.size() == op_end() && "Should add up!"); + + setName(NameStr); +} + +CallBrInst::CallBrInst(const CallBrInst &CBI) + : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr, + OperandTraits::op_end(this) - CBI.getNumOperands(), + CBI.getNumOperands()) { + setCallingConv(CBI.getCallingConv()); + std::copy(CBI.op_begin(), CBI.op_end(), op_begin()); + std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(), + bundle_op_info_begin()); + SubclassOptionalData = CBI.SubclassOptionalData; + NumTransfers = CBI.NumTransfers; +} + +CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef OpB, + Instruction *InsertPt) { + std::vector Args(CBI->arg_begin(), CBI->arg_end()); + + auto *NewCBI = CallBrInst::Create(CBI->getCalledValue(), + CBI->getFallthrough(), CBI->getTransfers(), + Args, OpB, CBI->getName(), InsertPt); + NewCBI->setCallingConv(CBI->getCallingConv()); + NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; + NewCBI->setAttributes(CBI->getAttributes()); + NewCBI->setDebugLoc(CBI->getDebugLoc()); + NewCBI->NumTransfers = CBI->NumTransfers; + return NewCBI; +} + +//===----------------------------------------------------------------------===// // ReturnInst Implementation //===----------------------------------------------------------------------===// @@ -4024,6 +4095,14 @@ return new(getNumOperands()) InvokeInst(*this); } +CallBrInst *CallBrInst::cloneImpl() const { + if (hasOperandBundles()) { + unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); + return new (getNumOperands(), DescriptorBytes) CallBrInst(*this); + } + return new (getNumOperands()) CallBrInst(*this); +} + ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } CleanupReturnInst *CleanupReturnInst::cloneImpl() const { Index: lib/IR/Value.cpp =================================================================== --- lib/IR/Value.cpp +++ lib/IR/Value.cpp @@ -58,7 +58,8 @@ // FIXME: Why isn't this in the subclass gunk?? // Note, we cannot call isa before the CallInst has been // constructed. - if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke) + if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke || + SubclassID == Instruction::CallBr) assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) && "invalid CallInst type!"); else if (SubclassID != BasicBlockVal && Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -467,6 +467,7 @@ void visitReturnInst(ReturnInst &RI); void visitSwitchInst(SwitchInst &SI); void visitIndirectBrInst(IndirectBrInst &BI); + void visitCallBrInst(CallBrInst &CBI); void visitSelectInst(SelectInst &SI); void visitUserOp1(Instruction &I); void visitUserOp2(Instruction &I) { visitUserOp1(I); } @@ -2451,6 +2452,24 @@ visitTerminator(BI); } +void Verifier::visitCallBrInst(CallBrInst &CBI) { + Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", + &CBI); + for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i) + Assert(CBI.getSuccessor(i)->getType()->isLabelTy(), + "Callbr successors must all have pointer type!", &CBI); + for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) { + Assert(i >= CBI.getNumArgOperands() || !isa(CBI.getOperand(i)), + "Using an unescaped label as a callbr argument!", &CBI); + if (isa(CBI.getOperand(i))) + for (unsigned j = i + 1; j != e; ++j) + Assert(CBI.getOperand(i) != CBI.getOperand(j), + "Duplicate callbr destination!", &CBI); + } + + visitTerminator(CBI); +} + void Verifier::visitSelectInst(SelectInst &SI) { Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), SI.getOperand(2)), Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -2558,8 +2558,20 @@ // Working from the bottom, when we see a non-terminator instruction, we're // done. - if (!isUnpredicatedTerminator(*I)) - break; + if (!isUnpredicatedTerminator(*I)) { + // If the non-terminator is an inline assembly block, we pessimistically + // assume it's an asm-goto that can't be handled by this analysis. + // In cases of non-void returns, the asm may be the second-to-last + // non-terminator instead. + // TODO: make a more precise check for asm-goto with outputs to reduce + // false positives + if (I->getOpcode() == X86::INLINEASM || + (I != MBB.begin() && I->getOpcode() == TargetOpcode::COPY && + (--I)->getOpcode() == X86::INLINEASM)) + return true; + else + break; + } // A terminator that isn't a branch can't easily be handled by this // analysis. Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements the visitCall and visitInvoke functions. +// This file implements the visitCall, visitInvoke and visitCallBr functions. // //===----------------------------------------------------------------------===// @@ -1830,8 +1830,8 @@ IntrinsicInst *II = dyn_cast(&CI); if (!II) return visitCallSite(&CI); - // Intrinsics cannot occur in an invoke, so handle them here instead of in - // visitCallSite. + // Intrinsics cannot occur in an invoke or a callbr, so handle them here + // instead of in visitCallSite. if (auto *MI = dyn_cast(II)) { bool Changed = false; @@ -4032,6 +4032,11 @@ return visitCallSite(&II); } +// CallBrInst simplification +Instruction *InstCombiner::visitCallBrInst(CallBrInst &CBI) { + return visitCallSite(&CBI); +} + /// If this cast does not affect the value passed through the varargs area, we /// can eliminate the use of the cast. static bool isSafeToEliminateVarargsCast(const CallSite CS, @@ -4160,7 +4165,7 @@ return nullptr; } -/// Improvements for call and invoke instructions. +/// Improvements for call, invoke and callbr instructions. Instruction *InstCombiner::visitCallSite(CallSite CS) { if (isAllocLikeFn(CS.getInstruction(), &TLI)) return visitAllocSite(*CS.getInstruction()); @@ -4193,7 +4198,7 @@ } // If the callee is a pointer to a function, attempt to move any casts to the - // arguments of the call/invoke. + // arguments of the call/invoke/callbr. Value *Callee = CS.getCalledValue(); if (!isa(Callee) && transformConstExprCastCall(CS)) return nullptr; @@ -4226,10 +4231,14 @@ if (isa(OldCall)) return eraseInstFromFunction(*OldCall); - // We cannot remove an invoke, because it would change the CFG, just - // change the callee to a null pointer. - cast(OldCall)->setCalledFunction( - Constant::getNullValue(CalleeF->getType())); + // We cannot remove an invoke or a callbr, because it would change the + // CFG, just change the callee to a null pointer. + if (isa(OldCall)) + cast(OldCall)->setCalledFunction( + Constant::getNullValue(CalleeF->getType())); + else + cast(OldCall)->setCalledFunction( + Constant::getNullValue(CalleeF->getType())); return nullptr; } } @@ -4248,6 +4257,11 @@ return nullptr; } + if (isa(CS.getInstruction())) { + // Can't remove a callbr because we cannot change the CFG. + return nullptr; + } + // This instruction is not reachable, just remove it. We insert a store to // undef so that we know that this code is not reachable, despite the fact // that we can't modify the CFG here. @@ -4297,7 +4311,7 @@ } /// If the callee is a constexpr cast of a function, attempt to move the cast to -/// the arguments of the call/invoke. +/// the arguments of the call/invoke/callbr. bool InstCombiner::transformConstExprCastCall(CallSite CS) { auto *Callee = dyn_cast(CS.getCalledValue()->stripPointerCasts()); if (!Callee) @@ -4348,17 +4362,29 @@ return false; // Attribute not compatible with transformed value. } - // If the callsite is an invoke instruction, and the return value is used by - // a PHI node in a successor, we cannot change the return type of the call - // because there is no place to put the cast instruction (without breaking - // the critical edge). Bail out in this case. - if (!Caller->use_empty()) - if (InvokeInst *II = dyn_cast(Caller)) + // If the callsite is an invoke/callbr instruction, and the return value is + // used by a PHI node in a successor, we cannot change the return type of + // the call because there is no place to put the cast instruction (without + // breaking the critical edge). Bail out in this case. + if (!Caller->use_empty()) { + if (InvokeInst *II = dyn_cast(Caller)) { for (User *U : II->users()) if (PHINode *PN = dyn_cast(U)) if (PN->getParent() == II->getNormalDest() || PN->getParent() == II->getUnwindDest()) return false; + } else if (CallBrInst *CBI = dyn_cast(Caller)) { + for (User *U : CBI->users()) + if (PHINode *PN = dyn_cast(U)) { + if (PN->getParent() == CBI->getFallthrough()) + return false; + SmallVector Transfers = CBI->getTransfers(); + for (unsigned i = 0, e = Transfers.size(); i < e; ++i) + if (PN->getParent() == CBI->getTransfer(i)) + return false; + } + } + } } unsigned NumActualArgs = CS.arg_size(); @@ -4512,6 +4538,9 @@ if (InvokeInst *II = dyn_cast(Caller)) { NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles); + } else if (CallBrInst *CBI = dyn_cast(Caller)) { + NewCS = Builder.CreateCallBr(Callee, CBI->getFallthrough(), + CBI->getTransfers(), Args, OpBundles); } else { NewCS = Builder.CreateCall(Callee, Args, OpBundles); cast(NewCS.getInstruction()) @@ -4535,11 +4564,14 @@ NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); NC->setDebugLoc(Caller->getDebugLoc()); - // If this is an invoke instruction, we should insert it after the first - // non-phi, instruction in the normal successor block. + // If this is an invoke/callbr instruction, we should insert it after the + // first non-phi instruction in the normal successor block. if (InvokeInst *II = dyn_cast(Caller)) { BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); InsertNewInstBefore(NC, *I); + } else if (CallBrInst *CBI = dyn_cast(Caller)) { + BasicBlock::iterator I = CBI->getFallthrough()->getFirstInsertionPt(); + InsertNewInstBefore(NC, *I); } else { // Otherwise, it's a call, just insert cast right after the call. InsertNewInstBefore(NC, *Caller); @@ -4692,6 +4724,12 @@ NewArgs, OpBundles); cast(NewCaller)->setCallingConv(II->getCallingConv()); cast(NewCaller)->setAttributes(NewPAL); + } else if (CallBrInst *CBI = dyn_cast(Caller)) { + NewCaller = + CallBrInst::Create(NewCallee, CBI->getFallthrough(), + CBI->getTransfers(), NewArgs, OpBundles); + cast(NewCaller)->setCallingConv(CBI->getCallingConv()); + cast(NewCaller)->setAttributes(NewPAL); } else { NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles); cast(NewCaller)->setTailCallKind( Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -394,6 +394,7 @@ Instruction *visitSelectInst(SelectInst &SI); Instruction *visitCallInst(CallInst &CI); Instruction *visitInvokeInst(InvokeInst &II); + Instruction *visitCallBrInst(CallBrInst &CBI); Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); Instruction *visitPHINode(PHINode &PN); Index: lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- lib/Transforms/InstCombine/InstructionCombining.cpp +++ lib/Transforms/InstCombine/InstructionCombining.cpp @@ -925,6 +925,10 @@ if (InvokeInst *II = dyn_cast(InVal)) if (II->getParent() == NonConstBB) return nullptr; + // Same for callbr. + if (CallBrInst *CBI = dyn_cast(InVal)) + if (CBI->getParent() == NonConstBB) + return nullptr; // If the incoming non-constant value is in I's block, we will remove one // instruction, but insert another equivalent one, leading to infinite @@ -2332,6 +2336,14 @@ None, "", II->getParent()); } + if (CallBrInst *CBI = dyn_cast(&MI)) { + // Replace callbr with a NOP intrinsic to maintain the original CFG + Module *M = CBI->getModule(); + Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing); + CallBrInst::Create(F, CBI->getFallthrough(), CBI->getTransfers(), None, + "", CBI->getParent()); + } + for (auto *DII : DIIs) eraseInstFromFunction(*DII); Index: lib/Transforms/Scalar/JumpThreading.cpp =================================================================== --- lib/Transforms/Scalar/JumpThreading.cpp +++ lib/Transforms/Scalar/JumpThreading.cpp @@ -1056,7 +1056,7 @@ Condition = IB->getAddress()->stripPointerCasts(); Preference = WantBlockAddress; } else { - return false; // Must be an invoke. + return false; // Must be an invoke or callbr. } // Run constant folding to see if we can reduce the condition to a simple Index: lib/Transforms/Scalar/SCCP.cpp =================================================================== --- lib/Transforms/Scalar/SCCP.cpp +++ lib/Transforms/Scalar/SCCP.cpp @@ -639,6 +639,11 @@ visitTerminator(II); } + void visitCallBrInst (CallBrInst &CBI) { + visitCallSite(&CBI); + visitTerminator(CBI); + } + void visitCallSite (CallSite CS); void visitResumeInst (ResumeInst &I) { /*returns void*/ } void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ } @@ -734,6 +739,13 @@ return; } + // In case of callbr, we pessimistically assume that all successors are + // feasible. + if (isa(&TI)) { + Succs.assign(TI.getNumSuccessors(), true); + return; + } + LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); llvm_unreachable("SCCP: Don't know how to handle this terminator!"); } @@ -1598,6 +1610,7 @@ return true; case Instruction::Call: case Instruction::Invoke: + case Instruction::CallBr: // There are two reasons a call can have an undef result // 1. It could be tracked. // 2. It could be constant-foldable. Index: lib/Transforms/Utils/BasicBlockUtils.cpp =================================================================== --- lib/Transforms/Utils/BasicBlockUtils.cpp +++ lib/Transforms/Utils/BasicBlockUtils.cpp @@ -532,6 +532,8 @@ // all BlockAddress uses would need to be updated. assert(!isa(Preds[i]->getTerminator()) && "Cannot split an edge from an IndirectBrInst"); + assert(!isa(Preds[i]->getTerminator()) && + "Cannot split an edge from a CallBrInst"); Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB); } Index: lib/Transforms/Utils/InlineFunction.cpp =================================================================== --- lib/Transforms/Utils/InlineFunction.cpp +++ lib/Transforms/Utils/InlineFunction.cpp @@ -1507,6 +1507,10 @@ assert(TheCall->getParent() && TheCall->getFunction() && "Instruction not in function!"); + // FIXME: we don't inline callbr yet. + if (isa(TheCall)) + return false; + // If IFI has any state in it, zap it before we fill it in. IFI.reset(); @@ -1734,6 +1738,8 @@ Instruction *NewI = nullptr; if (isa(I)) NewI = CallInst::Create(cast(I), OpDefs, I); + else if (isa(I)) + NewI = CallBrInst::Create(cast(I), OpDefs, I); else NewI = InvokeInst::Create(cast(I), OpDefs, I); @@ -2038,6 +2044,8 @@ Instruction *NewInst; if (CS.isCall()) NewInst = CallInst::Create(cast(I), OpBundles, I); + else if (CS.isCallBr()) + NewInst = CallBrInst::Create(cast(I), OpBundles, I); else NewInst = InvokeInst::Create(cast(I), OpBundles, I); NewInst->takeName(I); Index: lib/Transforms/Utils/Local.cpp =================================================================== --- lib/Transforms/Utils/Local.cpp +++ lib/Transforms/Utils/Local.cpp @@ -997,6 +997,19 @@ } } + // We cannot fold the block if it's a branch to an already present callbr + // successor because that creates duplicate successors. + for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { + auto CBI = dyn_cast((*I)->getTerminator()); + if (CBI) { + if (Succ == CBI->getFallthrough()) + return false; + for (BasicBlock *CBBB : CBI->getTransfers()) + if (Succ == CBBB) + return false; + } + } + LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); SmallVector Updates; Index: lib/Transforms/Utils/LoopSimplify.cpp =================================================================== --- lib/Transforms/Utils/LoopSimplify.cpp +++ lib/Transforms/Utils/LoopSimplify.cpp @@ -28,6 +28,9 @@ // to transform the loop and make these guarantees. Client code should check // that these conditions are true before relying on them. // +// Similar complications arise from callbr instructions, particularly in +// asm-goto where blockaddress expressions are used. +// // Note that the simplifycfg pass will clean up blocks which are split out but // end up being unnecessary, so usage of this pass should not pessimize // generated code. @@ -128,6 +131,8 @@ // be able to fully transform the loop, because it prohibits // edge splitting. if (isa(P->getTerminator())) return nullptr; + // Same for callbr. + if (isa(P->getTerminator())) return nullptr; // Keep track of it. OutsideBlocks.push_back(P); @@ -239,6 +244,9 @@ // We can't split indirectbr edges. if (isa(PN->getIncomingBlock(i)->getTerminator())) return nullptr; + // We can't split callbr edges either. + if (isa(PN->getIncomingBlock(i)->getTerminator())) + return nullptr; OuterLoopPreds.push_back(PN->getIncomingBlock(i)); } } @@ -361,6 +369,9 @@ // Indirectbr edges cannot be split, so we must fail if we find one. if (isa(P->getTerminator())) return nullptr; + // Same for callbr. + if (isa(P->getTerminator())) + return nullptr; if (P != Preheader) BackedgeBlocks.push_back(P); } Index: lib/Transforms/Utils/LoopUtils.cpp =================================================================== --- lib/Transforms/Utils/LoopUtils.cpp +++ lib/Transforms/Utils/LoopUtils.cpp @@ -66,6 +66,9 @@ if (isa(PredBB->getTerminator())) // We cannot rewrite exiting edges from an indirectbr. return false; + if (isa(PredBB->getTerminator())) + // We cannot rewrite exiting edges from a callbr. + return false; InLoopPredecessors.push_back(PredBB); } else { Index: lib/Transforms/Utils/SimplifyCFG.cpp =================================================================== --- lib/Transforms/Utils/SimplifyCFG.cpp +++ lib/Transforms/Utils/SimplifyCFG.cpp @@ -1267,7 +1267,8 @@ I2 = &*BB2_Itr++; } if (isa(I1) || !I1->isIdenticalToWhenDefined(I2) || - (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2))) + (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) || + isa(I1)) return false; BasicBlock *BIParent = BI->getParent(); @@ -1353,6 +1354,10 @@ if (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) return Changed; + // TODO: callbr hoisting currently disabled pending further study. + if (isa(I1)) + return Changed; + for (BasicBlock *Succ : successors(BB1)) { for (PHINode &PN : Succ->phis()) { Value *BB1V = PN.getIncomingValueForBlock(BB1); @@ -1458,6 +1463,9 @@ if (const auto *C = dyn_cast(I)) if (C->isInlineAsm()) return false; + if (const auto *CB = dyn_cast(I)) + if (CB->isInlineAsm()) + return false; // Everything must have only one use too, apart from stores which // have no uses. @@ -1522,6 +1530,14 @@ // FIXME: if the call was *already* indirect, we should do this. return false; } + // Avoiding indirect callbrs. + // FIXME: review the above. The callee is not the final operand of an + // invoke, but the third last. + if (const CallBrInst *CBI = dyn_cast(I0)) { + // FIXME: if the call was *already* indirect, we should do this. + if (OI == OE - 2 - CBI->getNumTransfers()) + return false; + } for (auto *I : Insts) PHIOperands[I].push_back(I->getOperand(OI)); } Index: test/Bitcode/callbr.ll =================================================================== --- /dev/null +++ test/Bitcode/callbr.ll @@ -0,0 +1,14 @@ +; RUN: llvm-dis < %s.bc | FileCheck %s + +; callbr.ll.bc was generated by passing this file to llvm-as. + +define i32 @test_asm_goto(i32 %x){ +entry: +; CHECK: callbr void asm "", "r,X"(i32 %x, i8* blockaddress(@test_asm_goto, %fail)) +; CHECK-NEXT: to label %normal or jump [label %fail] + callbr void asm "", "r,X"(i32 %x, i8* blockaddress(@test_asm_goto, %fail)) to label %normal or jump [label %fail] +normal: + ret i32 1 +fail: + ret i32 0 +} Index: test/CodeGen/X86/callbr-asm-destinations.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/callbr-asm-destinations.ll @@ -0,0 +1,15 @@ +; RUN: not llc -mtriple=i686-- < %s 2> %t +; RUN: FileCheck %s < %t + +; CHECK: Duplicate callbr destination + +; A test for asm-goto duplicate labels limitation + +define i32 @test(i32 %a) { +entry: + %0 = add i32 %a, 4 + callbr void asm "xorl $0, $0; jmp ${1:l}", "r,X,~{dirflag},~{fpsr},~{flags}"(i32 %0, i8* blockaddress(@test, %fail)) to label %fail or jump [label %fail] + +fail: + ret i32 1 +} Index: test/CodeGen/X86/callbr-asm-errors.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/callbr-asm-errors.ll @@ -0,0 +1,18 @@ +; RUN: not llc -mtriple=i686-- < %s 2> %t +; RUN: FileCheck %s < %t + +; CHECK: Duplicate callbr destination + +; A test for asm-goto duplicate labels limitation + +define i32 @test(i32 %a) { +entry: + %0 = add i32 %a, 4 + callbr void asm "xorl $0, $0; jmp ${1:l}", "r,X,X,~{dirflag},~{fpsr},~{flags}"(i32 %0, i8* blockaddress(@test, %fail), i8* blockaddress(@test, %fail)) to label %normal or jump [label %fail, label %fail] + +normal: + ret i32 %0 + +fail: + ret i32 1 +} Index: test/CodeGen/X86/callbr-asm-outputs.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/callbr-asm-outputs.ll @@ -0,0 +1,18 @@ +; RUN: not llc -mtriple=i686-- < %s 2> %t +; RUN: FileCheck %s < %t + +; CHECK: error: asm-goto outputs not supported + +; A test for asm-goto output prohibition + +define i32 @test(i32 %a) { +entry: + %0 = add i32 %a, 4 + %1 = callbr i32 asm "xorl $1, $1; jmp ${1:l}", "=&r,r,X,~{dirflag},~{fpsr},~{flags}"(i32 %0, i8* blockaddress(@test, %fail)) to label %normal or jump [label %fail] + +normal: + ret i32 %1 + +fail: + ret i32 1 +} Index: test/CodeGen/X86/callbr-asm.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/callbr-asm.ll @@ -0,0 +1,133 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-- -O3 | FileCheck %s + +; Tests for using callbr as an asm-goto wrapper + +; Test 1 - fallthrough label gets removed, but the fallthrough code that is +; unreachable due to asm ending on a jmp is still left in. +define i32 @test1(i32 %a) { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: addl $4, %eax +; CHECK-NEXT: #APP +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: jmp .LBB0_2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: # %bb.1: # %normal +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: retl +; CHECK-NEXT: .Ltmp0: # Block address taken +; CHECK-NEXT: .LBB0_2: # %fail +; CHECK-NEXT: movl $1, %eax +; CHECK-NEXT: retl +entry: + %0 = add i32 %a, 4 + callbr void asm "xorl $0, $0; jmp ${1:l}", "r,X,~{dirflag},~{fpsr},~{flags}"(i32 %0, i8* blockaddress(@test1, %fail)) to label %normal or jump [label %fail] + +normal: + ret i32 0 + +fail: + ret i32 1 +} + +; Test 2 - callbr terminates an unreachable block, function gets simplified +; to a trivial zero return. +define i32 @test2(i32 %a) { +; CHECK-LABEL: test2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: retl +entry: + br label %normal + +unreachableasm: + %0 = add i32 %a, 4 + callbr void asm sideeffect "xorl $0, $0; jmp ${1:l}", "r,X,~{dirflag},~{fpsr},~{flags}"(i32 %0, i8* blockaddress(@test2, %fail)) to label %normal or jump [label %fail] + +normal: + ret i32 0 + +fail: + ret i32 1 +} + + +; Test 3 - asm-goto implements a loop. The loop gets recognized, but many loop +; transforms fail due to canonicalization having callbr exceptions. Trivial +; blocks at labels 1 and 3 also don't get simplified due to callbr. +define dso_local i32 @test3(i32 %a) { +; CHECK-LABEL: test3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: .Ltmp1: # Block address taken +; CHECK-NEXT: .LBB2_1: # %label01 +; CHECK-NEXT: # =>This Loop Header: Depth=1 +; CHECK-NEXT: # Child Loop BB2_2 Depth 2 +; CHECK-NEXT: # Child Loop BB2_3 Depth 3 +; CHECK-NEXT: # Child Loop BB2_4 Depth 4 +; CHECK-NEXT: .Ltmp2: # Block address taken +; CHECK-NEXT: .LBB2_2: # %label02 +; CHECK-NEXT: # Parent Loop BB2_1 Depth=1 +; CHECK-NEXT: # => This Loop Header: Depth=2 +; CHECK-NEXT: # Child Loop BB2_3 Depth 3 +; CHECK-NEXT: # Child Loop BB2_4 Depth 4 +; CHECK-NEXT: addl $4, {{[0-9]+}}(%esp) +; CHECK-NEXT: .Ltmp3: # Block address taken +; CHECK-NEXT: .LBB2_3: # %label03 +; CHECK-NEXT: # Parent Loop BB2_1 Depth=1 +; CHECK-NEXT: # Parent Loop BB2_2 Depth=2 +; CHECK-NEXT: # => This Loop Header: Depth=3 +; CHECK-NEXT: # Child Loop BB2_4 Depth 4 +; CHECK-NEXT: .p2align 4, 0x90 +; CHECK-NEXT: .Ltmp4: # Block address taken +; CHECK-NEXT: .LBB2_4: # %label04 +; CHECK-NEXT: # Parent Loop BB2_1 Depth=1 +; CHECK-NEXT: # Parent Loop BB2_2 Depth=2 +; CHECK-NEXT: # Parent Loop BB2_3 Depth=3 +; CHECK-NEXT: # => This Inner Loop Header: Depth=4 +; CHECK-NEXT: #APP +; CHECK-NEXT: jmp .LBB2_1 +; CHECK-NEXT: jmp .LBB2_2 +; CHECK-NEXT: jmp .LBB2_3 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: # %bb.5: # %normal0 +; CHECK-NEXT: # in Loop: Header=BB2_4 Depth=4 +; CHECK-NEXT: #APP +; CHECK-NEXT: jmp .LBB2_1 +; CHECK-NEXT: jmp .LBB2_2 +; CHECK-NEXT: jmp .LBB2_3 +; CHECK-NEXT: jmp .LBB2_4 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: # %bb.6: # %normal1 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: retl +entry: + %a.addr = alloca i32, align 4 + store i32 %a, i32* %a.addr, align 4 + br label %label01 + +label01: ; preds = %normal0, %label04, %entry + br label %label02 + +label02: ; preds = %normal0, %label04, %label01 + %0 = load i32, i32* %a.addr, align 4 + %add = add nsw i32 %0, 4 + store i32 %add, i32* %a.addr, align 4 + br label %label03 + +label03: ; preds = %normal0, %label04, %label02 + br label %label04 + +label04: ; preds = %normal0, %label03 + callbr void asm sideeffect "jmp ${0:l}; jmp ${1:l}; jmp ${2:l}", "X,X,X,~{dirflag},~{fpsr},~{flags}"(i8* blockaddress(@test3, %label01), i8* blockaddress(@test3, %label02), i8* blockaddress(@test3, %label03)) + to label %normal0 or jump [label %label01, label %label02, label %label03] + +normal0: ; preds = %label04 + callbr void asm sideeffect "jmp ${0:l}; jmp ${1:l}; jmp ${2:l}; jmp ${3:l}", "X,X,X,X,~{dirflag},~{fpsr},~{flags}"(i8* blockaddress(@test3, %label01), i8* blockaddress(@test3, %label02), i8* blockaddress(@test3, %label03), i8* blockaddress(@test3, %label04)) + to label %normal1 or jump [label %label01, label %label02, label %label03, label %label04] + +normal1: ; preds = %normal0 + %1 = load i32, i32* %a.addr, align 4 + ret i32 %1 +} Index: test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll =================================================================== --- test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll +++ test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll @@ -63,14 +63,6 @@ resume { i8*, i32 } zeroinitializer } -define i8 @call_with_same_range() { -; CHECK-LABEL: @call_with_same_range -; CHECK: tail call i8 @call_with_range - bitcast i8 0 to i8 - %out = call i8 @dummy(), !range !0 - ret i8 %out -} - define i8 @invoke_with_same_range() personality i8* undef { ; CHECK-LABEL: @invoke_with_same_range() ; CHECK: tail call i8 @invoke_with_range() @@ -84,6 +76,13 @@ resume { i8*, i32 } zeroinitializer } +define i8 @call_with_same_range() { +; CHECK-LABEL: @call_with_same_range +; CHECK: tail call i8 @call_with_range + bitcast i8 0 to i8 + %out = call i8 @dummy(), !range !0 + ret i8 %out +} declare i8 @dummy(); Index: test/Transforms/MergeFunc/inline-asm.ll =================================================================== --- test/Transforms/MergeFunc/inline-asm.ll +++ test/Transforms/MergeFunc/inline-asm.ll @@ -3,13 +3,13 @@ ; CHECK-LABEL: @int_ptr_arg_different ; CHECK-NEXT: call void asm +; CHECK-LABEL: @int_ptr_null +; CHECK-NEXT: tail call void @float_ptr_null() + ; CHECK-LABEL: @int_ptr_arg_same ; CHECK-NEXT: %2 = bitcast i32* %0 to float* ; CHECK-NEXT: tail call void @float_ptr_arg_same(float* %2) -; CHECK-LABEL: @int_ptr_null -; CHECK-NEXT: tail call void @float_ptr_null() - ; Used to satisfy minimum size limit declare void @stuff()