Index: lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp =================================================================== --- lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp +++ lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp @@ -27,6 +27,8 @@ #include "llvm/MC/MCTargetOptions.h" #include "llvm/Support/CommandLine.h" #include +#include +#include namespace llvm { namespace { @@ -61,26 +63,52 @@ class X86AddressSanitizer : public X86AsmInstrumentation { public: struct RegisterContext { + public: RegisterContext(unsigned AddressReg, unsigned ShadowReg, - unsigned ScratchReg) - : AddressReg(AddressReg), ShadowReg(ShadowReg), ScratchReg(ScratchReg) { + unsigned ScratchReg) { + for (unsigned Reg : { AddressReg, ShadowReg, ScratchReg }) { + BusyRegs.push_back(convReg(Reg, MVT::i64)); + } } unsigned addressReg(MVT::SimpleValueType VT) const { - return getX86SubSuperRegister(AddressReg, VT); + return convReg(BusyRegs[0], VT); } unsigned shadowReg(MVT::SimpleValueType VT) const { - return getX86SubSuperRegister(ShadowReg, VT); + return convReg(BusyRegs[1], VT); } unsigned scratchReg(MVT::SimpleValueType VT) const { - return getX86SubSuperRegister(ScratchReg, VT); + return convReg(BusyRegs[2], VT); + } + + void addBusyReg(unsigned Reg) { + BusyRegs.push_back(convReg(Reg, MVT::i64)); + } + + void addBusyRegs(const X86Operand &Op) { + addBusyReg(Op.getMemBaseReg()); + addBusyReg(Op.getMemIndexReg()); + } + + unsigned chooseFrameReg(MVT::SimpleValueType VT) const { + static const unsigned Candidates[] = { X86::RBP, X86::RAX, X86::RBX, + X86::RCX, X86::RDX, X86::RDI, + X86::RSI }; + for (unsigned Reg : Candidates) { + if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg)) + return convReg(Reg, VT); + } + return X86::NoRegister; } - const unsigned AddressReg; - const unsigned ShadowReg; - const unsigned ScratchReg; + private: + unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const { + return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT); + } + + std::vector BusyRegs; }; X86AddressSanitizer(const MCSubtargetInfo &STI) @@ -191,6 +219,9 @@ IsSmallMemAccess(AccessSize) ? X86::RBX : X86::NoRegister /* ScratchReg */); + RegCtx.addBusyReg(DstReg); + RegCtx.addBusyReg(SrcReg); + RegCtx.addBusyReg(CntReg); InstrumentMemOperandPrologue(RegCtx, Ctx, Out); @@ -297,16 +328,17 @@ } const bool IsWrite = MII.get(Inst.getOpcode()).mayStore(); - RegisterContext RegCtx(X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */, - IsSmallMemAccess(AccessSize) - ? X86::RCX - : X86::NoRegister /* ScratchReg */); for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) { assert(Operands[Ix]); MCParsedAsmOperand &Op = *Operands[Ix]; if (Op.isMem()) { X86Operand &MemOp = static_cast(Op); + RegisterContext RegCtx( + X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */, + IsSmallMemAccess(AccessSize) ? X86::RCX + : X86::NoRegister /* ScratchReg */); + RegCtx.addBusyRegs(MemOp); InstrumentMemOperandPrologue(RegCtx, Ctx, Out); InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out); InstrumentMemOperandEpilogue(RegCtx, Ctx, Out); @@ -414,23 +446,29 @@ virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.chooseFrameReg(MVT::i32); + assert(LocalFrameReg != X86::NoRegister); + const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - SpillReg(Out, X86::EBP); + SpillReg(Out, LocalFrameReg); if (FrameReg == X86::ESP) { - Out.EmitCFIAdjustCfaOffset(4 /* byte size of the FrameReg */); - Out.EmitCFIRelOffset(MRI->getDwarfRegNum(X86::EBP, true /* IsEH */), 0); + Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */); + Out.EmitCFIRelOffset( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0); } EmitInstruction( - Out, MCInstBuilder(X86::MOV32rr).addReg(X86::EBP).addReg(FrameReg)); + Out, + MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg)); Out.EmitCFIRememberState(); - Out.EmitCFIDefCfaRegister(MRI->getDwarfRegNum(X86::EBP, true /* IsEH */)); + Out.EmitCFIDefCfaRegister( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */)); } SpillReg(Out, RegCtx.addressReg(MVT::i32)); SpillReg(Out, RegCtx.shadowReg(MVT::i32)); - if (RegCtx.ScratchReg != X86::NoRegister) + if (RegCtx.scratchReg(MVT::i32) != X86::NoRegister) SpillReg(Out, RegCtx.scratchReg(MVT::i32)); StoreFlags(Out); } @@ -438,18 +476,21 @@ virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.chooseFrameReg(MVT::i32); + assert(LocalFrameReg != X86::NoRegister); + RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) + if (RegCtx.scratchReg(MVT::i32) != X86::NoRegister) RestoreReg(Out, RegCtx.scratchReg(MVT::i32)); RestoreReg(Out, RegCtx.shadowReg(MVT::i32)); RestoreReg(Out, RegCtx.addressReg(MVT::i32)); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - RestoreReg(Out, X86::EBP); + RestoreReg(Out, LocalFrameReg); Out.EmitCFIRestoreState(); if (FrameReg == X86::ESP) - Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the FrameReg */); + Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */); } } @@ -494,7 +535,7 @@ unsigned ShadowRegI32 = RegCtx.shadowReg(MVT::i32); unsigned ShadowRegI8 = RegCtx.shadowReg(MVT::i8); - assert(RegCtx.ScratchReg != X86::NoRegister); + assert(RegCtx.scratchReg(MVT::i32) != X86::NoRegister); unsigned ScratchRegI32 = RegCtx.scratchReg(MVT::i32); ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out); @@ -663,24 +704,30 @@ virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.chooseFrameReg(MVT::i64); + assert(LocalFrameReg != X86::NoRegister); + const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { SpillReg(Out, X86::RBP); if (FrameReg == X86::RSP) { - Out.EmitCFIAdjustCfaOffset(8 /* byte size of the FrameReg */); - Out.EmitCFIRelOffset(MRI->getDwarfRegNum(X86::RBP, true /* IsEH */), 0); + Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */); + Out.EmitCFIRelOffset( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0); } EmitInstruction( - Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RBP).addReg(FrameReg)); + Out, + MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg)); Out.EmitCFIRememberState(); - Out.EmitCFIDefCfaRegister(MRI->getDwarfRegNum(X86::RBP, true /* IsEH */)); + Out.EmitCFIDefCfaRegister( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */)); } EmitAdjustRSP(Ctx, Out, -128); SpillReg(Out, RegCtx.shadowReg(MVT::i64)); SpillReg(Out, RegCtx.addressReg(MVT::i64)); - if (RegCtx.ScratchReg != X86::NoRegister) + if (RegCtx.scratchReg(MVT::i64) != X86::NoRegister) SpillReg(Out, RegCtx.scratchReg(MVT::i64)); StoreFlags(Out); } @@ -688,8 +735,11 @@ virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.chooseFrameReg(MVT::i64); + assert(LocalFrameReg != X86::NoRegister); + RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) + if (RegCtx.scratchReg(MVT::i64) != X86::NoRegister) RestoreReg(Out, RegCtx.scratchReg(MVT::i64)); RestoreReg(Out, RegCtx.addressReg(MVT::i64)); RestoreReg(Out, RegCtx.shadowReg(MVT::i64)); @@ -697,10 +747,10 @@ unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - RestoreReg(Out, X86::RBP); + RestoreReg(Out, LocalFrameReg); Out.EmitCFIRestoreState(); if (FrameReg == X86::RSP) - Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the FrameReg */); + Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */); } } @@ -736,7 +786,7 @@ .addReg(X86::RSP) .addImm(-16)); - if (RegCtx.AddressReg != X86::RDI) { + if (RegCtx.addressReg(MVT::i64) != X86::RDI) { EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg( RegCtx.addressReg(MVT::i64))); } @@ -757,7 +807,7 @@ unsigned ShadowRegI32 = RegCtx.shadowReg(MVT::i32); unsigned ShadowRegI8 = RegCtx.shadowReg(MVT::i8); - assert(RegCtx.ScratchReg != X86::NoRegister); + assert(RegCtx.scratchReg(MVT::i32) != X86::NoRegister); unsigned ScratchRegI32 = RegCtx.scratchReg(MVT::i32); ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out); Index: test/Instrumentation/AddressSanitizer/X86/asm_cfi.s =================================================================== --- test/Instrumentation/AddressSanitizer/X86/asm_cfi.s +++ test/Instrumentation/AddressSanitizer/X86/asm_cfi.s @@ -3,20 +3,20 @@ # RUN: llvm-mc %s -triple=i386-unknown-linux-gnu -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s -# CHECK-LABEL: swap_cfa_rbp -# CHECK: pushl %ebp +# CHECK-LABEL: load4b_cfa_rbp +# CHECK: pushl %ebx # CHECK-NOT: .cfi_adjust_cfa_offset 8 -# CHECK: movl %ebp, %ebp +# CHECK: movl %ebp, %ebx # CHECK: .cfi_remember_state -# CHECK: .cfi_def_cfa_register %ebp -# CHECK: popl %ebp +# CHECK: .cfi_def_cfa_register %ebx +# CHECK: popl %ebx # CHECK: .cfi_restore_state # CHECK-NOT: .cfi_adjust_cfa_offset -8 # CHECK: retl .text - .globl swap_cfa_rbp - .type swap_cfa_rbp,@function + .globl load4b_cfa_rbp + .type load4b_cfa_rbp,@function swap_cfa_rbp: # @swap_cfa_rbp .cfi_startproc pushl %ebp @@ -25,34 +25,28 @@ movl %esp, %ebp .cfi_def_cfa_register %ebp movl 8(%ebp), %eax - movl 12(%ebp), %ecx - movl (%ecx), %ecx - movl %ecx, (%eax) popl %ebp retl .cfi_endproc -# CHECK-LABEL: swap_cfa_rsp -# CHECK: pushl %ebp +# CHECK-LABEL: load4b_cfa_rsp +# CHECK: pushl %ebx # CHECK: .cfi_adjust_cfa_offset 4 -# CHECK: movl %esp, %ebp +# CHECK: movl %esp, %ebx # CHECK: .cfi_remember_state -# CHECK: .cfi_def_cfa_register %ebp -# CHECK: popl %ebp +# CHECK: .cfi_def_cfa_register %ebx +# CHECK: popl %ebx # CHECK: .cfi_restore_state # CHECK: retl - .globl swap_cfa_rsp - .type swap_cfa_rsp,@function + .globl load4b_cfa_rsp + .type load4b_cfa_rsp,@function swap_cfa_rsp: # @swap_cfa_rsp .cfi_startproc pushl %ebp .cfi_offset %ebp, 0 movl %esp, %ebp movl 8(%ebp), %eax - movl 12(%ebp), %ecx - movl (%ecx), %ecx - movl %ecx, (%eax) popl %ebp retl .cfi_endproc