diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h --- a/bolt/include/bolt/Core/MCPlusBuilder.h +++ b/bolt/include/bolt/Core/MCPlusBuilder.h @@ -854,6 +854,15 @@ return false; } + struct X86MemOperand { + unsigned BaseRegNum; + int64_t ScaleImm; + unsigned IndexRegNum; + int64_t DispImm; + unsigned SegRegNum; + const MCExpr *DispExpr = nullptr; + }; + /// Given an instruction with (compound) memory operand, evaluate and return /// the corresponding values. Note that the operand could be in any position, /// but there is an assumption there's only one compound memory operand. @@ -862,13 +871,10 @@ /// /// Since a Displacement field could be either an immediate or an expression, /// the function sets either \p DispImm or \p DispExpr value. - virtual bool - evaluateX86MemoryOperand(const MCInst &Inst, unsigned *BaseRegNum, - int64_t *ScaleImm, unsigned *IndexRegNum, - int64_t *DispImm, unsigned *SegmentRegNum, - const MCExpr **DispExpr = nullptr) const { + virtual std::optional + evaluateX86MemoryOperand(const MCInst &Inst) const { llvm_unreachable("not implemented"); - return false; + return std::nullopt; } /// Given an instruction with memory addressing attempt to statically compute diff --git a/bolt/include/bolt/Passes/RetpolineInsertion.h b/bolt/include/bolt/Passes/RetpolineInsertion.h --- a/bolt/include/bolt/Passes/RetpolineInsertion.h +++ b/bolt/include/bolt/Passes/RetpolineInsertion.h @@ -30,14 +30,7 @@ bool isJump() const { return !IsCall; } bool isTailCall() const { return IsTailCall; } - struct MemOpInfo { - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - int64_t DispValue; - unsigned SegRegNum; - const MCExpr *DispExpr{nullptr}; - }; + using MemOpInfo = MCPlusBuilder::X86MemOperand; union { // Register branch information diff --git a/bolt/lib/Passes/RetpolineInsertion.cpp b/bolt/lib/Passes/RetpolineInsertion.cpp --- a/bolt/lib/Passes/RetpolineInsertion.cpp +++ b/bolt/lib/Passes/RetpolineInsertion.cpp @@ -134,8 +134,8 @@ MCInst LoadCalleeAddrs; const IndirectBranchInfo::MemOpInfo &MemRef = BrInfo.Memory; - MIB.createLoad(LoadCalleeAddrs, MemRef.BaseRegNum, MemRef.ScaleValue, - MemRef.IndexRegNum, MemRef.DispValue, MemRef.DispExpr, + MIB.createLoad(LoadCalleeAddrs, MemRef.BaseRegNum, MemRef.ScaleImm, + MemRef.IndexRegNum, MemRef.DispImm, MemRef.DispExpr, MemRef.SegRegNum, MIB.getX86R11(), 8); BB2.addInstruction(LoadCalleeAddrs); @@ -195,11 +195,10 @@ ? "r" + to_string(MemRef.BaseRegNum) : ""; - Tag += - MemRef.DispExpr ? "+" + DispExprStr : "+" + to_string(MemRef.DispValue); + Tag += MemRef.DispExpr ? "+" + DispExprStr : "+" + to_string(MemRef.DispImm); Tag += MemRef.IndexRegNum != BC.MIB->getNoRegister() - ? "+" + to_string(MemRef.ScaleValue) + "*" + + ? "+" + to_string(MemRef.ScaleImm) + "*" + to_string(MemRef.IndexRegNum) : ""; @@ -232,8 +231,8 @@ if (BrInfo.isMem() && R11Available) { const IndirectBranchInfo::MemOpInfo &MemRef = BrInfo.Memory; MCInst LoadCalleeAddrs; - MIB.createLoad(LoadCalleeAddrs, MemRef.BaseRegNum, MemRef.ScaleValue, - MemRef.IndexRegNum, MemRef.DispValue, MemRef.DispExpr, + MIB.createLoad(LoadCalleeAddrs, MemRef.BaseRegNum, MemRef.ScaleImm, + MemRef.IndexRegNum, MemRef.DispImm, MemRef.DispExpr, MemRef.SegRegNum, MIB.getX86R11(), 8); Replacement.push_back(LoadCalleeAddrs); } @@ -252,11 +251,11 @@ if (MIB.isBranchOnMem(Inst)) { IsMem = true; - if (!MIB.evaluateX86MemoryOperand(Inst, &Memory.BaseRegNum, - &Memory.ScaleValue, - &Memory.IndexRegNum, &Memory.DispValue, - &Memory.SegRegNum, &Memory.DispExpr)) + std::optional MO = + MIB.evaluateX86MemoryOperand(Inst); + if (!MO) llvm_unreachable("not expected"); + Memory = MO.value(); } else if (MIB.isBranchOnReg(Inst)) { assert(MCPlus::getNumPrimeOperands(Inst) == 1 && "expect 1 operand"); BranchReg = Inst.getOperand(0).getReg(); @@ -306,9 +305,9 @@ IndirectBranchInfo::MemOpInfo &MemRef = BrInfo.Memory; int Addend = (BrInfo.isJump() || BrInfo.isTailCall()) ? 8 : 16; if (MemRef.BaseRegNum == MIB.getStackPointer()) - MemRef.DispValue += Addend; + MemRef.DispImm += Addend; if (MemRef.IndexRegNum == MIB.getStackPointer()) - MemRef.DispValue += Addend * MemRef.ScaleValue; + MemRef.DispImm += Addend * MemRef.ScaleImm; } TargetRetpoline = getOrCreateRetpoline(BC, BrInfo, R11Available); diff --git a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp --- a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp +++ b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp @@ -672,20 +672,15 @@ return X86::isMacroFused(CmpKind, BranchKind); } - bool - evaluateX86MemoryOperand(const MCInst &Inst, unsigned *BaseRegNum, - int64_t *ScaleImm, unsigned *IndexRegNum, - int64_t *DispImm, unsigned *SegmentRegNum, - const MCExpr **DispExpr = nullptr) const override { - assert(BaseRegNum && ScaleImm && IndexRegNum && SegmentRegNum && - "one of the input pointers is null"); + std::optional + evaluateX86MemoryOperand(const MCInst &Inst) const override { int MemOpNo = getMemoryOperandNo(Inst); if (MemOpNo < 0) - return false; + return std::nullopt; unsigned MemOpOffset = static_cast(MemOpNo); if (MemOpOffset + X86::AddrSegmentReg >= MCPlus::getNumPrimeOperands(Inst)) - return false; + return std::nullopt; const MCOperand &Base = Inst.getOperand(MemOpOffset + X86::AddrBaseReg); const MCOperand &Scale = Inst.getOperand(MemOpOffset + X86::AddrScaleAmt); @@ -697,47 +692,33 @@ // Make sure it is a well-formed memory operand. if (!Base.isReg() || !Scale.isImm() || !Index.isReg() || (!Disp.isImm() && !Disp.isExpr()) || !Segment.isReg()) - return false; + return std::nullopt; - *BaseRegNum = Base.getReg(); - *ScaleImm = Scale.getImm(); - *IndexRegNum = Index.getReg(); - if (Disp.isImm()) { - assert(DispImm && "DispImm needs to be set"); - *DispImm = Disp.getImm(); - if (DispExpr) - *DispExpr = nullptr; - } else { - assert(DispExpr && "DispExpr needs to be set"); - *DispExpr = Disp.getExpr(); - if (DispImm) - *DispImm = 0; - } - *SegmentRegNum = Segment.getReg(); - return true; + X86MemOperand MO; + MO.BaseRegNum = Base.getReg(); + MO.ScaleImm = Scale.getImm(); + MO.IndexRegNum = Index.getReg(); + MO.DispImm = Disp.isImm() ? Disp.getImm() : 0; + MO.DispExpr = Disp.isExpr() ? Disp.getExpr() : nullptr; + MO.SegRegNum = Segment.getReg(); + return MO; } bool evaluateMemOperandTarget(const MCInst &Inst, uint64_t &Target, uint64_t Address, uint64_t Size) const override { - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - int64_t DispValue; - unsigned SegRegNum; - const MCExpr *DispExpr = nullptr; - if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum, - &DispValue, &SegRegNum, &DispExpr)) + std::optional MO = evaluateX86MemoryOperand(Inst); + if (!MO) return false; // Make sure it's a well-formed addressing we can statically evaluate. - if ((BaseRegNum != X86::RIP && BaseRegNum != X86::NoRegister) || - IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister || - DispExpr) + if ((MO->BaseRegNum != X86::RIP && MO->BaseRegNum != X86::NoRegister) || + MO->IndexRegNum != X86::NoRegister || + MO->SegRegNum != X86::NoRegister || MO->DispExpr) return false; - Target = DispValue; - if (BaseRegNum == X86::RIP) { + Target = MO->DispImm; + if (MO->BaseRegNum == X86::RIP) { assert(Size != 0 && "instruction size required in order to statically " "evaluate RIP-relative address"); Target += Address + Size; @@ -1113,21 +1094,15 @@ case X86::MOV32mi: I = {4, false, true, false, true}; break; } // end switch (Inst.getOpcode()) - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - int64_t DispValue; - unsigned SegRegNum; - const MCExpr *DispExpr; - if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum, - &DispValue, &SegRegNum, &DispExpr)) { + std::optional MO = evaluateX86MemoryOperand(Inst); + if (!MO) { LLVM_DEBUG(dbgs() << "Evaluate failed on "); LLVM_DEBUG(Inst.dump()); return false; } // Make sure it's a stack access - if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP) + if (MO->BaseRegNum != X86::RBP && MO->BaseRegNum != X86::RSP) return false; IsLoad = I.IsLoad; @@ -1135,9 +1110,10 @@ IsStoreFromReg = I.StoreFromReg; Size = I.DataSize; IsSimple = I.Simple; - StackPtrReg = BaseRegNum; - StackOffset = DispValue; - IsIndexed = IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister; + StackPtrReg = MO->BaseRegNum; + StackOffset = MO->DispImm; + IsIndexed = + MO->IndexRegNum != X86::NoRegister || MO->SegRegNum != X86::NoRegister; if (!I.Simple) return true; @@ -1189,19 +1165,13 @@ case X86::MOV32mi: I = {4, false, false}; break; } // end switch (Inst.getOpcode()) - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - int64_t DispValue; - unsigned SegRegNum; - const MCExpr *DispExpr; - if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum, - &DispValue, &SegRegNum, &DispExpr)) { + std::optional MO = evaluateX86MemoryOperand(Inst); + if (!MO) { llvm_unreachable("Evaluate failed"); return; } // Make sure it's a stack access - if (BaseRegNum != X86::RBP && BaseRegNum != X86::RSP) { + if (MO->BaseRegNum != X86::RBP && MO->BaseRegNum != X86::RSP) { llvm_unreachable("Not a stack access"); return; } @@ -1321,23 +1291,17 @@ break; case X86::LEA64r: { - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - int64_t DispValue; - unsigned SegRegNum; - const MCExpr *DispExpr = nullptr; - if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, - &IndexRegNum, &DispValue, &SegRegNum, - &DispExpr)) + std::optional MO = evaluateX86MemoryOperand(Inst); + if (!MO) return false; - if (BaseRegNum == X86::NoRegister || IndexRegNum != X86::NoRegister || - SegRegNum != X86::NoRegister || DispExpr) + if (MO->BaseRegNum == X86::NoRegister || + MO->IndexRegNum != X86::NoRegister || + MO->SegRegNum != X86::NoRegister || MO->DispExpr) return false; - if (ErrorOr InputVal = getOperandVal(BaseRegNum)) - Output = *InputVal + DispValue; + if (ErrorOr InputVal = getOperandVal(MO->BaseRegNum)) + Output = *InputVal + MO->DispImm; else return false; @@ -2087,17 +2051,12 @@ } // Verify operands for MOV. - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - int64_t DispValue; - unsigned SegRegNum; - if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue, - &IndexRegNum, &DispValue, &SegRegNum)) + std::optional MO = evaluateX86MemoryOperand(Instr); + if (!MO) break; - if (BaseRegNum != R1 || ScaleValue != 4 || - IndexRegNum == X86::NoRegister || DispValue != 0 || - SegRegNum != X86::NoRegister) + if (MO->BaseRegNum != R1 || MO->ScaleImm != 4 || + MO->IndexRegNum == X86::NoRegister || MO->DispImm != 0 || + MO->SegRegNum != X86::NoRegister) break; MovInstr = &Instr; } else { @@ -2113,19 +2072,12 @@ } // Verify operands for LEA. - unsigned BaseRegNum; - int64_t ScaleValue; - unsigned IndexRegNum; - const MCExpr *DispExpr = nullptr; - int64_t DispValue; - unsigned SegRegNum; - if (!evaluateX86MemoryOperand(Instr, &BaseRegNum, &ScaleValue, - &IndexRegNum, &DispValue, &SegRegNum, - &DispExpr)) + std::optional MO = evaluateX86MemoryOperand(Instr); + if (!MO) break; - if (BaseRegNum != RegInfo->getProgramCounter() || - IndexRegNum != X86::NoRegister || SegRegNum != X86::NoRegister || - DispExpr == nullptr) + if (MO->BaseRegNum != RegInfo->getProgramCounter() || + MO->IndexRegNum != X86::NoRegister || + MO->SegRegNum != X86::NoRegister || MO->DispExpr == nullptr) break; MemLocInstr = &Instr; break; @@ -2223,36 +2175,31 @@ const MCRegister RIPRegister = RegInfo->getProgramCounter(); // Analyze the memory location. - unsigned BaseRegNum, IndexRegNum, SegRegNum; - int64_t ScaleValue, DispValue; - const MCExpr *DispExpr; - - if (!evaluateX86MemoryOperand(*MemLocInstr, &BaseRegNum, &ScaleValue, - &IndexRegNum, &DispValue, &SegRegNum, - &DispExpr)) + std::optional MO = evaluateX86MemoryOperand(*MemLocInstr); + if (!MO) return IndirectBranchType::UNKNOWN; - BaseRegNumOut = BaseRegNum; - IndexRegNumOut = IndexRegNum; - DispValueOut = DispValue; - DispExprOut = DispExpr; + BaseRegNumOut = MO->BaseRegNum; + IndexRegNumOut = MO->IndexRegNum; + DispValueOut = MO->DispImm; + DispExprOut = MO->DispExpr; - if ((BaseRegNum != X86::NoRegister && BaseRegNum != RIPRegister) || - SegRegNum != X86::NoRegister) + if ((MO->BaseRegNum != X86::NoRegister && MO->BaseRegNum != RIPRegister) || + MO->SegRegNum != X86::NoRegister) return IndirectBranchType::UNKNOWN; if (MemLocInstr == &Instruction && - (!ScaleValue || IndexRegNum == X86::NoRegister)) { + (!MO->ScaleImm || MO->IndexRegNum == X86::NoRegister)) { MemLocInstrOut = MemLocInstr; return IndirectBranchType::POSSIBLE_FIXED_BRANCH; } if (Type == IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE && - (ScaleValue != 1 || BaseRegNum != RIPRegister)) + (MO->ScaleImm != 1 || MO->BaseRegNum != RIPRegister)) return IndirectBranchType::UNKNOWN; if (Type != IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE && - ScaleValue != PtrSize) + MO->ScaleImm != PtrSize) return IndirectBranchType::UNKNOWN; MemLocInstrOut = MemLocInstr; @@ -2296,20 +2243,15 @@ MCInst &CallInst = *Itr++; assert(isIndirectBranch(CallInst) || isCall(CallInst)); - unsigned BaseReg, IndexReg, SegmentReg; - int64_t Scale, Disp; - const MCExpr *DispExpr; - // The call can just be jmp offset(reg) - if (evaluateX86MemoryOperand(CallInst, &BaseReg, &Scale, &IndexReg, &Disp, - &SegmentReg, &DispExpr)) { - if (!DispExpr && BaseReg != X86::RIP && BaseReg != X86::RBP && - BaseReg != X86::NoRegister) { - MethodRegNum = BaseReg; - if (Scale == 1 && IndexReg == X86::NoRegister && - SegmentReg == X86::NoRegister) { + if (std::optional MO = evaluateX86MemoryOperand(CallInst)) { + if (!MO->DispExpr && MO->BaseRegNum != X86::RIP && + MO->BaseRegNum != X86::RBP && MO->BaseRegNum != X86::NoRegister) { + MethodRegNum = MO->BaseRegNum; + if (MO->ScaleImm == 1 && MO->IndexRegNum == X86::NoRegister && + MO->SegRegNum == X86::NoRegister) { VtableRegNum = MethodRegNum; - MethodOffset = Disp; + MethodOffset = MO->DispImm; MethodFetchInsns.push_back(&CallInst); return true; } @@ -2332,15 +2274,17 @@ MCInst &CurInst = *Itr++; const MCInstrDesc &Desc = Info->get(CurInst.getOpcode()); if (Desc.hasDefOfPhysReg(CurInst, MethodRegNum, *RegInfo)) { - if (isLoad(CurInst) && - evaluateX86MemoryOperand(CurInst, &BaseReg, &Scale, &IndexReg, - &Disp, &SegmentReg, &DispExpr)) { - if (!DispExpr && Scale == 1 && BaseReg != X86::RIP && - BaseReg != X86::RBP && BaseReg != X86::NoRegister && - IndexReg == X86::NoRegister && SegmentReg == X86::NoRegister && - BaseReg != X86::RIP) { - VtableRegNum = BaseReg; - MethodOffset = Disp; + if (!isLoad(CurInst)) + return false; + if (std::optional MO = + evaluateX86MemoryOperand(CurInst)) { + if (!MO->DispExpr && MO->ScaleImm == 1 && + MO->BaseRegNum != X86::RIP && MO->BaseRegNum != X86::RBP && + MO->BaseRegNum != X86::NoRegister && + MO->IndexRegNum == X86::NoRegister && + MO->SegRegNum == X86::NoRegister && MO->BaseRegNum != X86::RIP) { + VtableRegNum = MO->BaseRegNum; + MethodOffset = MO->DispImm; MethodFetchInsns.push_back(&CurInst); if (MethodOffset != 0) return true;