Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -1465,7 +1465,13 @@ /// like i140, which are first promoted then expanded, it is the number of /// registers needed to hold all the bits of the original type. For an i140 /// on a 32 bit machine this means 5 registers. - unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { + /// + /// RC is typically not needed, but it may be passed as a way to override + /// the default settings, for instance with i128 inline assembly operands + /// on SystemZ. + virtual unsigned + getNumRegisters(LLVMContext &Context, EVT VT, + const TargetRegisterClass *RC = nullptr) const { if (VT.isSimple()) { assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)); Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -895,7 +895,8 @@ /// the number of values added into it. void AddInlineAsmOperands(unsigned Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, - SelectionDAG &DAG, std::vector &Ops) const; + SelectionDAG &DAG, std::vector &Ops, + const TargetRegisterClass *RC) const; /// Check if the total RegCount is greater than one. bool occupiesMultipleRegs() const { Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -940,22 +940,20 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, - std::vector &Ops) const { + std::vector &Ops, + const TargetRegisterClass *RC) const { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); if (HasMatching) Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); - else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) { + else if (!Regs.empty()) // Put the register class of the virtual registers in the flag word. That // way, later passes can recompute register class constraints for inline // assembly as well as normal instructions. // Don't do this for tied operands that can use the regclass information // from the def. - const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); - const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); - } SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); Ops.push_back(Res); @@ -979,7 +977,10 @@ } for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { - unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); + // Pass RC since inline assembly operands may not follow the ordinary + // pattern of splitting. + unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value], + RC); MVT RegisterVT = RegVTs[Value]; for (unsigned i = 0; i != NumRegs; ++i) { assert(Reg < Regs.size() && "Mismatch in # registers expected"); @@ -8150,7 +8151,8 @@ /// RefOpInfo describes the matching operand if any, the operand otherwise static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL, SDISelAsmOperandInfo &OpInfo, - SDISelAsmOperandInfo &RefOpInfo) { + SDISelAsmOperandInfo &RefOpInfo, + const TargetRegisterClass *&RC) { LLVMContext &Context = *DAG.getContext(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -8165,7 +8167,6 @@ // If this is a constraint for a single physreg, or a constraint for a // register class, find it. unsigned AssignedReg; - const TargetRegisterClass *RC; std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint( &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT); // RC is unset only on failure. Return immediately. @@ -8177,7 +8178,7 @@ // remember that AX is actually i16 to get the right extension. const MVT RegVT = *TRI.legalclasstypes_begin(*RC); - if (OpInfo.ConstraintVT != MVT::Other) { + if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) { // If this is an FP operand in an integer register (or visa versa), or more // generally if the operand value disagrees with the register class we plan // to stick it in, fix the operand type. @@ -8224,7 +8225,7 @@ // Initialize NumRegs. unsigned NumRegs = 1; if (OpInfo.ConstraintVT != MVT::Other) - NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); + NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RC); // If this is a constraint for a specific physical register, like {r17}, // assign it now. @@ -8472,7 +8473,8 @@ OpInfo.isMatchingInputConstraint() ? ConstraintOperands[OpInfo.getMatchedOperand()] : OpInfo; - GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo); + const TargetRegisterClass *RC = nullptr; + GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo, RC); auto DetectWriteToReservedRegister = [&]() { const MachineFunction &MF = DAG.getMachineFunction(); @@ -8522,7 +8524,7 @@ OpInfo.AssignedRegs.AddInlineAsmOperands( OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber : InlineAsm::Kind_RegDef, - false, 0, getCurSDLoc(), DAG, AsmNodeOperands); + false, 0, getCurSDLoc(), DAG, AsmNodeOperands, RC); } break; @@ -8550,12 +8552,17 @@ MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType(); SmallVector Regs; - if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) { + unsigned RCID; + bool Succ = InlineAsm::hasRegClassConstraint(OpFlag, RCID); + assert(Succ && "No RC for tied def?"); (void)Succ; + const TargetRegisterInfo *TRI = + DAG.getMachineFunction().getSubtarget().getRegisterInfo(); + if (const TargetRegisterClass *TiedRC = TRI->getRegClass(RCID)) { unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag); MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); for (unsigned i = 0; i != NumRegs; ++i) - Regs.push_back(RegInfo.createVirtualRegister(RC)); + Regs.push_back(RegInfo.createVirtualRegister(TiedRC)); } else { emitInlineAsmError(Call, "inline asm error: This value type register " @@ -8570,7 +8577,7 @@ MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call); MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, true, OpInfo.getMatchedOperand(), dl, - DAG, AsmNodeOperands); + DAG, AsmNodeOperands, RC); break; } @@ -8672,7 +8679,7 @@ &Call); OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, - dl, DAG, AsmNodeOperands); + dl, DAG, AsmNodeOperands, RC); break; } case InlineAsm::isClobber: @@ -8681,7 +8688,7 @@ if (!OpInfo.AssignedRegs.Regs.empty()) OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber, false, 0, getCurSDLoc(), DAG, - AsmNodeOperands); + AsmNodeOperands, RC); break; } } Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -4587,9 +4587,10 @@ // If this register class has the requested value type, return it, // otherwise keep searching and return the first class found // if no other is found which explicitly has the requested type. - if (RI->isTypeLegalForClass(*RC, VT)) + if (RI->isTypeLegalForClass(*RC, VT) && RC->isAllocatable()) return S; if (!R.second) + // RC->isAllocatable() && !R.second->isAllocatable()) R = S; } } Index: llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -5451,6 +5451,12 @@ if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID)) || NumRegs != 2) continue; + if (HasRC && NumRegs) { + // Check if RC was encoded while reg(s) is already assigned. + Register Reg = cast(N->getOperand(i + 1))->getReg(); + if (Register::isPhysicalRegister(Reg)) + continue; + } assert((i+2 < NumOps) && "Invalid number of operands in inline asm"); SDValue V0 = N->getOperand(i+1); Index: llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -217,6 +217,12 @@ if ((!IsTiedToChangedOp && (!HasRC || RC != SP::IntRegsRegClassID)) || NumRegs != 2) continue; + if (HasRC && NumRegs) { + // Check if RC was encoded while reg(s) is already assigned. + Register Reg = cast(N->getOperand(i + 1))->getReg(); + if (Register::isPhysicalRegister(Reg)) + continue; + } assert((i+2 < NumOps) && "Invalid number of operands in inline asm"); SDValue V0 = N->getOperand(i+1); Index: llvm/lib/Target/SystemZ/SystemZISelLowering.h =================================================================== --- llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -422,6 +422,19 @@ return TypeWidenVector; return TargetLoweringBase::getPreferredVectorAction(VT); } + const TargetRegisterClass * + getRegClassFor(MVT VT, bool isDivergent = false) const override { + if (VT == MVT::Untyped) // Needed for inline asm phys regs. + return &SystemZ::GR128BitRegClass; + return TargetLowering::getRegClassFor(VT); + } + unsigned + getNumRegisters(LLVMContext &Context, EVT VT, + const TargetRegisterClass *RC = nullptr) const override { + if (VT == MVT::i128 && RC == &SystemZ::GR128BitRegClass) + return 1; + return TargetLowering::getNumRegisters(Context, VT); + } bool isCheapToSpeculateCtlz() const override { return true; } EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override; @@ -517,6 +530,15 @@ const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; bool allowTruncateForTailCall(Type *, Type *) const override; bool mayBeEmittedAsTailCall(const CallInst *CI) const override; + bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, + SDValue Val, SDValue *Parts, + unsigned NumParts, MVT PartVT, + Optional CC) const override; + SDValue + joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, + const SDValue *Parts, unsigned NumParts, + MVT PartVT, EVT ValueVT, + Optional CC) const override; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, Index: llvm/lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1368,6 +1368,55 @@ } } +static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { + SDLoc DL(In); + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, + DAG.getIntPtrConstant(0, DL)); + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, + DAG.getIntPtrConstant(1, DL)); + SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, + MVT::Untyped, Hi, Lo); + return SDValue(Pair, 0); +} + +static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { + SDLoc DL(In); + SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, + DL, MVT::i64, In); + SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, + DL, MVT::i64, In); + return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); +} + +bool SystemZTargetLowering::splitValueIntoRegisterParts( + SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, + unsigned NumParts, MVT PartVT, Optional CC) const { + EVT ValueVT = Val.getValueType(); + assert((ValueVT != MVT::i128 || + ((NumParts == 1 && PartVT == MVT::Untyped) || + (NumParts == 2 && PartVT == MVT::i64))) && + "Unknown handling of i128 value."); + if (ValueVT == MVT::i128 && NumParts == 1) { + // Inline assembly operand. + Parts[0] = lowerI128ToGR128(DAG, Val); + return true; + } + return false; +} + +SDValue SystemZTargetLowering::joinRegisterPartsIntoValue( + SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, + MVT PartVT, EVT ValueVT, Optional CC) const { + assert((ValueVT != MVT::i128 || + ((NumParts == 1 && PartVT == MVT::Untyped) || + (NumParts == 2 && PartVT == MVT::i64))) && + "Unknown handling of i128 value."); + if (ValueVT == MVT::i128 && NumParts == 1) + // Inline assembly operand. + return lowerGR128ToI128(DAG, Parts[0]); + return SDValue(); +} + SDValue SystemZTargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl &Ins, const SDLoc &DL, @@ -5489,27 +5538,6 @@ // Lower operations with invalid operand or result types (currently used // only for 128-bit integer types). - -static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { - SDLoc DL(In); - SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, - DAG.getIntPtrConstant(0, DL)); - SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, - DAG.getIntPtrConstant(1, DL)); - SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, - MVT::Untyped, Hi, Lo); - return SDValue(Pair, 0); -} - -static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { - SDLoc DL(In); - SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, - DL, MVT::i64, In); - SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, - DL, MVT::i64, In); - return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); -} - void SystemZTargetLowering::LowerOperationWrapper(SDNode *N, SmallVectorImpl &Results, Index: llvm/lib/Target/X86/X86FloatingPoint.cpp =================================================================== --- llvm/lib/Target/X86/X86FloatingPoint.cpp +++ llvm/lib/Target/X86/X86FloatingPoint.cpp @@ -1545,12 +1545,19 @@ if (STReg >= 8) continue; - // If the flag has a register class constraint, this must be an operand - // with constraint "f". Record its index and continue. - if (InlineAsm::hasRegClassConstraint(Flags, RCID)) { - FRegIdx.insert(i + 1); - continue; - } + // If the flag has an FP Stack registers regclass constraint, record + // its index and continue. EXPERIMENTAL: This doesn't quite work since + // this is run after reg-alloc. Only virtual registers will have + // hasRegClassConstraint() currently, but there seems to be no good way + // to do the same check here while encoding the RC for all operands in + // SelectionDAGBuilder. Seems there would have to be a new bit added to + // Flags to indicate if the reg is originally virtual or physical..? + if (InlineAsm::hasRegClassConstraint(Flags, RCID) && + (RCID == X86::RFP32RegClassID || RCID == X86::RFP64RegClassID || + RCID == X86::RFP80RegClassID)) { + FRegIdx.insert(i + 1); + continue; + } switch (InlineAsm::getKind(Flags)) { case InlineAsm::Kind_RegUse: Index: llvm/test/CodeGen/SystemZ/inline-asm-i128.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/inline-asm-i128.ll @@ -0,0 +1,59 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=s390x-linux-gnu -no-integrated-as < %s | FileCheck %s +; +; Test i128 (tied) operands. + +define i32 @clcl(i8* %p1, i32 signext %l1, i8* %p2, i32 signext %l2, i8 zeroext %pad) { +; CHECK-LABEL: clcl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lgr %r0, %r5 +; CHECK-NEXT: # kill: def $r4d killed $r4d def $r4q +; CHECK-NEXT: lgr %r1, %r3 +; CHECK-NEXT: # kill: def $r2d killed $r2d def $r2q +; CHECK-NEXT: sllg %r5, %r6, 24 +; CHECK-NEXT: rosbg %r5, %r0, 40, 63, 0 +; CHECK-NEXT: risbg %r3, %r1, 40, 191, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: clcl %r2, %r4 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ogr %r3, %r5 +; CHECK-NEXT: risbg %r0, %r3, 40, 191, 0 +; CHECK-NEXT: ipm %r2 +; CHECK-NEXT: afi %r2, -268435456 +; CHECK-NEXT: srl %r2, 31 +; CHECK-NEXT: br %r14 +entry: + %0 = ptrtoint i8* %p1 to i64 + %1 = ptrtoint i8* %p2 to i64 + %and5 = and i32 %l2, 16777215 + %2 = zext i32 %and5 to i64 + %conv7 = zext i8 %pad to i64 + %shl = shl nuw nsw i64 %conv7, 24 + %or = or i64 %shl, %2 + %u1.sroa.0.0.insert.ext = zext i64 %0 to i128 + %u1.sroa.0.0.insert.shift = shl nuw i128 %u1.sroa.0.0.insert.ext, 64 + %3 = and i32 %l1, 16777215 + %u1.sroa.0.0.insert.mask = zext i32 %3 to i128 + %u1.sroa.0.0.insert.insert = or i128 %u1.sroa.0.0.insert.shift, %u1.sroa.0.0.insert.mask + %u2.sroa.5.0.insert.ext = zext i64 %or to i128 + %u2.sroa.0.0.insert.ext = zext i64 %1 to i128 + %u2.sroa.0.0.insert.shift = shl nuw i128 %u2.sroa.0.0.insert.ext, 64 + %u2.sroa.0.0.insert.insert = or i128 %u2.sroa.0.0.insert.shift, %u2.sroa.5.0.insert.ext + %4 = tail call { i128, i128 } asm "clcl $0, $1", "=r,=r,0,1"(i128 %u1.sroa.0.0.insert.insert, i128 %u2.sroa.0.0.insert.insert) + %asmresult = extractvalue { i128, i128 } %4, 0 + %asmresult11 = extractvalue { i128, i128 } %4, 1 + %5 = or i128 %asmresult, %asmresult11 + %6 = and i128 %5, 16777215 + %7 = icmp eq i128 %6, 0 + %land.ext = zext i1 %7 to i32 + ret i32 %land.ext +} + +; Test a tied phys-reg. +define void @fun(i128* %Src, i128* %Dst) { +entry: + %L = load i128, i128* %Src + %IAsm = call i128 asm "BLA $0, $1", "={r0},0"(i128 %L) + store volatile i128 %IAsm, i128* %Dst + ret void +} Index: llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll =================================================================== --- llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll +++ llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %2 ; CHECK-NEXT: t8: i32 = add t2, Constant:i32<4> ; CHECK-NEXT: t22: ch,glue = CopyToReg t17, Register:i32 %5, t8 -; CHECK-NEXT: t29: ch,glue = inlineasm_br t22, {{.*}}, t22:1 +; CHECK-NEXT: t31: ch,glue = inlineasm_br t22, {{.*}}, t22:1 define i32 @test(i32 %a, i32 %b, i32 %c) { entry: Index: llvm/test/CodeGen/X86/inline-asm-avx512f-x-constraint.ll =================================================================== --- llvm/test/CodeGen/X86/inline-asm-avx512f-x-constraint.ll +++ llvm/test/CodeGen/X86/inline-asm-avx512f-x-constraint.ll @@ -2,7 +2,7 @@ ; CHECK: %[[REG1:.*]]:vr512_0_15 = COPY %1 ; CHECK: %[[REG2:.*]]:vr512_0_15 = COPY %2 -; CHECK: INLINEASM &"vpaddq\09$3, $2, $0 {$1}", 0 /* attdialect */, {{.*}}, def %{{.*}}, {{.*}}, %{{.*}}, {{.*}}, %[[REG1]], {{.*}}, %[[REG2]], 12 /* clobber */, implicit-def early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def early-clobber $eflags +; CHECK: INLINEASM &"vpaddq\09$3, $2, $0 {$1}", 0 /* attdialect */, 7733258 /* regdef:VR512_0_15 */, def %3, 1179657 /* reguse:VK8WM */, %4, 7733257 /* reguse:VR512_0_15 */, %5, 7733257 /* reguse:VR512_0_15 */, %6, 3538956 /* clobber:DFCCR */, implicit-def early-clobber $df, 1376268 /* clobber:FPCCR */, implicit-def early-clobber $fpsw, 3473420 /* clobber:CCR */, implicit-def early-clobber $eflags define <8 x i64> @mask_Yk_i8(i8 signext %msk, <8 x i64> %x, <8 x i64> %y) { entry: Index: llvm/test/CodeGen/X86/inline-asm-default-clobbers.ll =================================================================== --- llvm/test/CodeGen/X86/inline-asm-default-clobbers.ll +++ llvm/test/CodeGen/X86/inline-asm-default-clobbers.ll @@ -1,6 +1,7 @@ ; RUN: llc < %s -mtriple=i686 -stop-after=finalize-isel | FileCheck %s -; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def early-clobber $eflags +; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */, 3538956 /* clobber:DFCCR */, implicit-def early-clobber $df, 1376268 /* clobber:FPCCR */, implicit-def early-clobber $fpsw, 3473420 /* clobber:CCR */, implicit-def early-clobber $eflags + define void @foo() { entry: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() Index: llvm/test/CodeGen/X86/tail-dup-asm-goto.ll =================================================================== --- llvm/test/CodeGen/X86/tail-dup-asm-goto.ll +++ llvm/test/CodeGen/X86/tail-dup-asm-goto.ll @@ -28,7 +28,7 @@ ; CHECK: bb.3.bb110: ; CHECK: successors: %bb.5(0x80000000), %bb.4(0x00000000) ; CHECK: [[PHI:%[0-9]+]]:gr64 = PHI [[COPY]], %bb.2, [[MOV64rm]], %bb.1 - ; CHECK: INLINEASM_BR &"#$0 $1 $2", 9 /* sideeffect mayload attdialect */, 13 /* imm */, 42, 13 /* imm */, 0, 13 /* imm */, blockaddress(@test1, %ir-block.bb17.i.i.i), 12 /* clobber */, implicit-def early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def early-clobber $eflags + ; CHECK: INLINEASM_BR &"#$0 $1 $2", 9 /* sideeffect mayload attdialect */, 13 /* imm */, 42, 13 /* imm */, 0, 13 /* imm */, blockaddress(@test1, %ir-block.bb17.i.i.i), 3538956 /* clobber:DFCCR */, implicit-def early-clobber $df, 1376268 /* clobber:FPCCR */, implicit-def early-clobber $fpsw, 3473420 /* clobber:CCR */, implicit-def early-clobber $eflags ; CHECK: JMP_1 %bb.5 ; CHECK: bb.4.bb17.i.i.i (address-taken): ; CHECK: successors: %bb.5(0x80000000)