diff --git a/llvm/lib/Target/BPF/BPFISelLowering.h b/llvm/lib/Target/BPF/BPFISelLowering.h --- a/llvm/lib/Target/BPF/BPFISelLowering.h +++ b/llvm/lib/Target/BPF/BPFISelLowering.h @@ -84,7 +84,7 @@ SmallVectorImpl &InVals) const; // Maximum number of arguments to a call - static const unsigned MaxArgs; + static const size_t MaxArgs; // Lower a call into CALLSEQ_START - BPFISD:CALL - CALLSEQ_END chain SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -26,7 +26,9 @@ #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" +#include using namespace llvm; #define DEBUG_TYPE "bpf-lower" @@ -35,22 +37,17 @@ cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order")); -static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) { - MachineFunction &MF = DAG.getMachineFunction(); - DAG.getContext()->diagnose( - DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); -} - -static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg, - SDValue Val) { - MachineFunction &MF = DAG.getMachineFunction(); +static void unsupported(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, + std::optional Val = std::nullopt) { std::string Str; - raw_string_ostream OS(Str); - OS << Msg; - Val->print(OS); - OS.flush(); - DAG.getContext()->diagnose( - DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc())); + if (Val.has_value()) { + raw_string_ostream OS(Str); + (*Val)->print(OS); + OS << ' '; + } + MachineFunction &MF = DAG.getMachineFunction(); + DAG.getContext()->diagnose(DiagnosticInfoUnsupported( + MF.getFunction(), Msg.concat(Str), DL.getDebugLoc())); } BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM, @@ -59,8 +56,9 @@ // Set up the register classes. addRegisterClass(MVT::i64, &BPF::GPRRegClass); - if (STI.getHasAlu32()) + if (STI.getHasAlu32()) { addRegisterClass(MVT::i32, &BPF::GPR32RegClass); + } // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); @@ -83,8 +81,9 @@ // from selectiondag. for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { if (VT == MVT::i32) { - if (STI.getHasAlu32()) + if (STI.getHasAlu32()) { continue; + } } else { setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); } @@ -97,8 +96,9 @@ } for (auto VT : { MVT::i32, MVT::i64 }) { - if (VT == MVT::i32 && !STI.getHasAlu32()) + if (VT == MVT::i32 && !STI.getHasAlu32()) { continue; + } setOperationAction(ISD::SDIVREM, VT, Expand); setOperationAction(ISD::UDIVREM, VT, Expand); @@ -190,32 +190,36 @@ } bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { - if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) { return false; + } unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); return NumBits1 > NumBits2; } bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { - if (!VT1.isInteger() || !VT2.isInteger()) + if (!VT1.isInteger() || !VT2.isInteger()) { return false; + } unsigned NumBits1 = VT1.getSizeInBits(); unsigned NumBits2 = VT2.getSizeInBits(); return NumBits1 > NumBits2; } bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { - if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy()) { return false; + } unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); return NumBits1 == 32 && NumBits2 == 64; } bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const { - if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger()) + if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger()) { return false; + } unsigned NumBits1 = VT1.getSizeInBits(); unsigned NumBits2 = VT2.getSizeInBits(); return NumBits1 == 32 && NumBits2 == 64; @@ -239,48 +243,57 @@ BPFTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { - if (Constraint.size() == 1) + if (Constraint.size() == 1) { // GCC Constraint Letters switch (Constraint[0]) { case 'r': // GENERAL_REGS return std::make_pair(0U, &BPF::GPRRegClass); case 'w': - if (HasAlu32) + if (HasAlu32) { return std::make_pair(0U, &BPF::GPR32RegClass); + } break; default: break; } + } return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); } void BPFTargetLowering::ReplaceNodeResults( SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { - const char *err_msg; + const char *Msg; uint32_t Opcode = N->getOpcode(); switch (Opcode) { - default: - report_fatal_error("Unhandled custom legalization"); + default: { + report_fatal_error("unhandled custom legalization: " + Twine(Opcode)); + } case ISD::ATOMIC_LOAD_ADD: case ISD::ATOMIC_LOAD_AND: case ISD::ATOMIC_LOAD_OR: case ISD::ATOMIC_LOAD_XOR: case ISD::ATOMIC_SWAP: case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: - if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD) - err_msg = "Unsupported atomic operations, please use 32/64 bit version"; - else - err_msg = "Unsupported atomic operations, please use 64 bit version"; + if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD) { + Msg = "unsupported atomic operation, please use 32/64 bit version"; + } else { + Msg = "unsupported atomic operation, please use 64 bit version"; + } break; } SDLoc DL(N); - fail(DL, DAG, err_msg); + // We'll still produce a fatal error downstream, but this diagnostic is more + // user-friendly. + unsupported(DL, DAG, Msg); } SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { + default: { + report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode())); + } case ISD::BR_CC: return LowerBR_CC(Op, DAG); case ISD::GlobalAddress: @@ -288,9 +301,7 @@ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::DYNAMIC_STACKALLOC: - report_fatal_error("Unsupported dynamic stack allocation"); - default: - llvm_unreachable("unimplemented operand"); + report_fatal_error("unsupported dynamic stack allocation"); } } @@ -302,8 +313,9 @@ const SmallVectorImpl &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl &InVals) const { switch (CallConv) { - default: - report_fatal_error("Unsupported calling convention"); + default: { + report_fatal_error("unimplemented calling convention: " + Twine(CallConv)); + } case CallingConv::C: case CallingConv::Fast: break; @@ -317,16 +329,22 @@ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64); - for (auto &VA : ArgLocs) { + bool HasMemArgs = false; + for (size_t I = 0; I < ArgLocs.size(); ++I) { + auto &VA = ArgLocs[I]; + if (VA.isRegLoc()) { // Arguments passed in registers EVT RegVT = VA.getLocVT(); MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy; switch (SimpleTy) { default: { - errs() << "LowerFormalArguments Unhandled argument type: " - << RegVT << '\n'; - llvm_unreachable(nullptr); + std::string Str; + { + raw_string_ostream OS(Str); + RegVT.print(OS); + } + report_fatal_error("unhandled argument type: " + Twine(Str)); } case MVT::i32: case MVT::i64: @@ -337,34 +355,76 @@ // If this is an value that has been promoted to wider types, insert an // assert[sz]ext to capture this, then truncate to the right size. - if (VA.getLocInfo() == CCValAssign::SExt) + if (VA.getLocInfo() == CCValAssign::SExt) { ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue, DAG.getValueType(VA.getValVT())); - else if (VA.getLocInfo() == CCValAssign::ZExt) + } else if (VA.getLocInfo() == CCValAssign::ZExt) { ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue, DAG.getValueType(VA.getValVT())); + } - if (VA.getLocInfo() != CCValAssign::Full) + if (VA.getLocInfo() != CCValAssign::Full) { ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue); + } InVals.push_back(ArgValue); - break; + break; } - } else { - fail(DL, DAG, "defined with too many args"); - InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT())); + } else if (VA.isMemLoc()) { + // NB: An earlier version of this code inserted constant zeros for "a + // chance that generated code is somewhat sane" (see comment in + // https://reviews.llvm.org/D20571?id=58249). + // + // Miscompiling and hoping that the result is "sane" is not a viable + // strategy. We should probably fatal here, but doing so has various + // downsides (e.g. subsequent errors will not reach the user, tests must + // be exit-on-error). The current approach (inserting UNDEF) produces + // rather obtuse verifier errors, roughly: + // + // clang-format off + // 320: (b7) r1 = 32 ; R1_w=32 + // 321: (b7) r2 = 1 ; R2_w=1 + // 322: (b7) r3 = 13 ; R3_w=13 + // 323: (b7) r4 = 184 ; R4_w=184 + // 324: (b7) r5 = 0 ; R5_w=0 + // 325: (85) call pc+665 + // caller: + // R6=17 R7=map_value(off=0,ks=4,vs=8192,imm=0) R8=0 R9=4 R10=fp0 fp-8=mmmm???? fp-16=ctx + // callee: + // frame1: R1_w=32 R2_w=1 R3_w=13 R4_w=184 R5_w=0 R10=fp0 + // 991: (b7) r0 = 1 ; frame1: R0_w=1 + // 992: (b7) r6 = 19 ; frame1: R6_w=19 + // 993: (2d) if r6 > r1 goto pc+22 ; frame1: R1=32 R6_w=19 + // 994: (b7) r0 = 16 ; frame1: R0_w=16 + // 995: (73) *(u8 *)(r1 +1) = r0 + // R1 invalid mem access 'scalar' + // verification time 522 usec + // stack depth 16+0+0+0 + // processed 440 insns (limit 1000000) max_states_per_insn 1 total_states 15 peak_states 15 mark_read 8 + // clang-format on + // + // It's not clear what the best thing to do here is. For now, err on the + // side of failing rather than running incorrectly. + + HasMemArgs = true; + InVals.push_back(DAG.getUNDEF(VA.getValVT())); } } - - if (IsVarArg || MF.getFunction().hasStructRetAttr()) { - fail(DL, DAG, "functions with VarArgs or StructRet are not supported"); + if (HasMemArgs) { + unsupported(DL, DAG, "stack arguments are not supported"); + } + if (IsVarArg) { + unsupported(DL, DAG, "variadic functions are not supported"); + } + if (MF.getFunction().hasStructRetAttr()) { + unsupported(DL, DAG, "sret functions are not supported"); } return Chain; } -const unsigned BPFTargetLowering::MaxArgs = 5; +const size_t BPFTargetLowering::MaxArgs = 5; SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { @@ -383,8 +443,9 @@ IsTailCall = false; switch (CallConv) { - default: - report_fatal_error("Unsupported calling convention"); + default: { + report_fatal_error("unsupported calling convention: " + Twine(CallConv)); + } case CallingConv::Fast: case CallingConv::C: break; @@ -398,15 +459,20 @@ unsigned NumBytes = CCInfo.getStackSize(); - if (Outs.size() > MaxArgs) - fail(CLI.DL, DAG, "too many args to ", Callee); + if (Outs.size() > MaxArgs) { + unsupported(CLI.DL, DAG, "too many arguments", Callee); + } + bool HasByVal = false; for (auto &Arg : Outs) { ISD::ArgFlagsTy Flags = Arg.Flags; - if (!Flags.isByVal()) - continue; - - fail(CLI.DL, DAG, "pass by value not supported ", Callee); + if (Flags.isByVal()) { + HasByVal = true; + break; + } + } + if (HasByVal) { + unsupported(CLI.DL, DAG, "pass by value not supported", Callee); } auto PtrVT = getPointerTy(MF.getDataLayout()); @@ -415,16 +481,15 @@ SmallVector, MaxArgs> RegsToPass; // Walk arg assignments - for (unsigned i = 0, - e = std::min(static_cast(ArgLocs.size()), MaxArgs); - i != e; ++i) { - CCValAssign &VA = ArgLocs[i]; - SDValue Arg = OutVals[i]; + for (size_t I = 0; I < std::min(ArgLocs.size(), MaxArgs); ++I) { + CCValAssign &VA = ArgLocs[I]; + SDValue &Arg = OutVals[I]; // Promote the value if needed. switch (VA.getLocInfo()) { - default: - llvm_unreachable("Unknown loc info"); + default: { + report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo())); + } case CCValAssign::Full: break; case CCValAssign::SExt: @@ -439,10 +504,11 @@ } // Push arguments into RegsToPass vector - if (VA.isRegLoc()) + if (VA.isRegLoc()) { RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); - else - llvm_unreachable("call arg pass bug"); + } else { + report_fatal_error("stack arguments are not supported"); + } } SDValue InGlue; @@ -463,9 +529,6 @@ G->getOffset(), 0); } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0); - fail(CLI.DL, DAG, Twine("A call to built-in function '" - + StringRef(E->getSymbol()) - + "' is not supported.")); } // Returns a chain & a flag for retval copy to use. @@ -479,8 +542,9 @@ for (auto &Reg : RegsToPass) Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); - if (InGlue.getNode()) + if (InGlue.getNode()) { Ops.push_back(InGlue); + } Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops); InGlue = Chain.getValue(1); @@ -513,7 +577,7 @@ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); if (MF.getFunction().getReturnType()->isAggregateType()) { - fail(DL, DAG, "only integer returns supported"); + unsupported(DL, DAG, "aggregate returns are not supported"); return DAG.getNode(Opc, DL, MVT::Other, Chain); } @@ -524,11 +588,14 @@ SmallVector RetOps(1, Chain); // Copy the result values into the output registers. - for (unsigned i = 0; i != RVLocs.size(); ++i) { - CCValAssign &VA = RVLocs[i]; - assert(VA.isRegLoc() && "Can only return in registers!"); + for (size_t I = 0; I != RVLocs.size(); ++I) { + CCValAssign &VA = RVLocs[I]; + if (!VA.isRegLoc()) { + unsupported(DL, DAG, "stack returns are not supported"); + return DAG.getNode(Opc, DL, MVT::Other, Chain); + } - Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue); + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[I], Glue); // Guarantee that all emitted copies are stuck together, // avoiding something bad. @@ -539,8 +606,9 @@ RetOps[0] = Chain; // Update chain. // Add the glue if we have it. - if (Glue.getNode()) + if (Glue.getNode()) { RetOps.push_back(Glue); + } return DAG.getNode(Opc, DL, MVT::Other, RetOps); } @@ -555,10 +623,14 @@ SmallVector RVLocs; CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); - if (Ins.size() >= 2) { - fail(DL, DAG, "only small returns supported"); - for (unsigned i = 0, e = Ins.size(); i != e; ++i) - InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT)); + if (Ins.size() > 1) { + // NB: An earlier version of this code inserted constant zeros. See the + // comment in LowerFormalArguments for details on why that's not a viable + // strategy. + unsupported(DL, DAG, "only small returns supported"); + for (auto &In : Ins) { + InVals.push_back(DAG.getUNDEF(In.VT)); + } return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InGlue).getValue(1); } @@ -575,7 +647,7 @@ return Chain; } -static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { +static void negateCondCode(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { switch (CC) { default: break; @@ -597,8 +669,9 @@ SDValue Dest = Op.getOperand(4); SDLoc DL(Op); - if (!getHasJmpExt()) - NegateCC(LHS, RHS, CC); + if (!getHasJmpExt()) { + negateCondCode(LHS, RHS, CC); + } return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS, DAG.getConstant(CC, DL, LHS.getValueType()), Dest); @@ -612,8 +685,9 @@ ISD::CondCode CC = cast(Op.getOperand(4))->get(); SDLoc DL(Op); - if (!getHasJmpExt()) - NegateCC(LHS, RHS, CC); + if (!getHasJmpExt()) { + negateCondCode(LHS, RHS, CC); + } SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType()); SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); @@ -644,8 +718,11 @@ SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { - auto N = cast(Op); - assert(N->getOffset() == 0 && "Invalid offset for global address"); + auto *N = cast(Op); + if (N->getOffset()) { + report_fatal_error("invalid offset for global address: " + + Twine(N->getOffset())); + } SDLoc DL(Op); const GlobalValue *GV = N->getGlobal(); @@ -654,18 +731,18 @@ return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA); } -unsigned -BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB, - unsigned Reg, bool isSigned) const { +unsigned BPFTargetLowering::EmitSubregExt(MachineInstr &MI, + MachineBasicBlock *BB, unsigned Reg, + bool IsSigned) const { const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); const TargetRegisterClass *RC = getRegClassFor(MVT::i64); - int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri; + int RShiftOp = IsSigned ? BPF::SRA_ri : BPF::SRL_ri; MachineFunction *F = BB->getParent(); DebugLoc DL = MI.getDebugLoc(); MachineRegisterInfo &RegInfo = F->getRegInfo(); - if (!isSigned) { + if (!IsSigned) { Register PromotedReg0 = RegInfo.createVirtualRegister(RC); BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg); return PromotedReg0; @@ -715,8 +792,8 @@ MachineBasicBlock * BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, - MachineBasicBlock *BB) const { - const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); + MachineBasicBlock *MBB) const { + const TargetInstrInfo &TII = *MBB->getParent()->getSubtarget().getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); unsigned Opc = MI.getOpcode(); bool isSelectRROp = (Opc == BPF::Select || @@ -724,21 +801,20 @@ Opc == BPF::Select_32 || Opc == BPF::Select_32_64); - bool isMemcpyOp = Opc == BPF::MEMCPY; + bool IsMemcpyOp = Opc == BPF::MEMCPY; #ifndef NDEBUG - bool isSelectRIOp = (Opc == BPF::Select_Ri || - Opc == BPF::Select_Ri_64_32 || - Opc == BPF::Select_Ri_32 || - Opc == BPF::Select_Ri_32_64); + bool IsSelectRiOp = (Opc == BPF::Select_Ri || Opc == BPF::Select_Ri_64_32 || + Opc == BPF::Select_Ri_32 || Opc == BPF::Select_Ri_32_64); - - assert((isSelectRROp || isSelectRIOp || isMemcpyOp) && - "Unexpected instr type to insert"); + if (!(isSelectRROp || IsSelectRiOp || IsMemcpyOp)) { + report_fatal_error("unhandled instruction type: " + Twine(Opc)); + } #endif - if (isMemcpyOp) - return EmitInstrWithCustomInserterMemcpy(MI, BB); + if (IsMemcpyOp) { + return EmitInstrWithCustomInserterMemcpy(MI, MBB); + } bool is32BitCmp = (Opc == BPF::Select_32 || Opc == BPF::Select_32_64 || @@ -749,29 +825,29 @@ // control-flow pattern. The incoming instruction knows the destination vreg // to set, the condition code register to branch on, the true/false values to // select between, and a branch opcode to use. - const BasicBlock *LLVM_BB = BB->getBasicBlock(); - MachineFunction::iterator I = ++BB->getIterator(); + const BasicBlock *BB = MBB->getBasicBlock(); + MachineFunction::iterator I = ++MBB->getIterator(); // ThisMBB: // ... // TrueVal = ... // jmp_XX r1, r2 goto Copy1MBB // fallthrough --> Copy0MBB - MachineBasicBlock *ThisMBB = BB; - MachineFunction *F = BB->getParent(); - MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *ThisMBB = MBB; + MachineFunction *F = MBB->getParent(); + MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB); + MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(BB); F->insert(I, Copy0MBB); F->insert(I, Copy1MBB); // Update machine-CFG edges by transferring all successors of the current // block to the new block which will contain the Phi node for the select. - Copy1MBB->splice(Copy1MBB->begin(), BB, - std::next(MachineBasicBlock::iterator(MI)), BB->end()); - Copy1MBB->transferSuccessorsAndUpdatePHIs(BB); + Copy1MBB->splice(Copy1MBB->begin(), MBB, + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); + Copy1MBB->transferSuccessorsAndUpdatePHIs(MBB); // Next, add the true and fallthrough blocks as its successors. - BB->addSuccessor(Copy0MBB); - BB->addSuccessor(Copy1MBB); + MBB->addSuccessor(Copy0MBB); + MBB->addSuccessor(Copy1MBB); // Insert Branch if Flag int CC = MI.getOperand(3).getImm(); @@ -799,10 +875,8 @@ } Register LHS = MI.getOperand(1).getReg(); - bool isSignedCmp = (CC == ISD::SETGT || - CC == ISD::SETGE || - CC == ISD::SETLT || - CC == ISD::SETLE); + bool IsSignedCmp = (CC == ISD::SETGT || CC == ISD::SETGE || + CC == ISD::SETLT || CC == ISD::SETLE); // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need // to be promoted, however if the 32-bit comparison operands are destination @@ -811,44 +885,47 @@ // // We simply do extension for all situations in this method, but we will // try to remove those unnecessary in BPFMIPeephole pass. - if (is32BitCmp && !HasJmp32) - LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp); + if (is32BitCmp && !HasJmp32) { + LHS = EmitSubregExt(MI, MBB, LHS, IsSignedCmp); + } if (isSelectRROp) { Register RHS = MI.getOperand(2).getReg(); - if (is32BitCmp && !HasJmp32) - RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp); + if (is32BitCmp && !HasJmp32) { + RHS = EmitSubregExt(MI, MBB, RHS, IsSignedCmp); + } - BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB); + BuildMI(MBB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB); } else { - int64_t imm32 = MI.getOperand(2).getImm(); + int64_t Imm32 = MI.getOperand(2).getImm(); // Check before we build J*_ri instruction. - assert (isInt<32>(imm32)); - BuildMI(BB, DL, TII.get(NewCC)) - .addReg(LHS).addImm(imm32).addMBB(Copy1MBB); + if (!isInt<32>(Imm32)) { + report_fatal_error("immediate overflows 32 bits: " + Twine(Imm32)); + } + BuildMI(MBB, DL, TII.get(NewCC)).addReg(LHS).addImm(Imm32).addMBB(Copy1MBB); } // Copy0MBB: // %FalseValue = ... // # fallthrough to Copy1MBB - BB = Copy0MBB; + MBB = Copy0MBB; // Update machine-CFG edges - BB->addSuccessor(Copy1MBB); + MBB->addSuccessor(Copy1MBB); // Copy1MBB: // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ] // ... - BB = Copy1MBB; - BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg()) + MBB = Copy1MBB; + BuildMI(*MBB, MBB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg()) .addReg(MI.getOperand(5).getReg()) .addMBB(Copy0MBB) .addReg(MI.getOperand(4).getReg()) .addMBB(ThisMBB); MI.eraseFromParent(); // The pseudo instruction is gone now. - return BB; + return MBB; } EVT BPFTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, @@ -866,8 +943,9 @@ unsigned AS, Instruction *I) const { // No global is ever allowed as a base. - if (AM.BaseGV) + if (AM.BaseGV) { return false; + } switch (AM.Scale) { case 0: // "r+i" or just "i", depending on HasBaseReg.