Index: lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp =================================================================== --- lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp +++ lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp @@ -36,7 +36,7 @@ cl::init(false)); bool IsStackReg(unsigned Reg) { - return Reg == X86::RSP || Reg == X86::ESP || Reg == X86::SP; + return Reg == X86::RSP || Reg == X86::ESP; } bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; } @@ -72,7 +72,8 @@ }; X86AddressSanitizer(const MCSubtargetInfo &STI) - : X86AsmInstrumentation(STI), RepPrefix(false) {} + : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {} + virtual ~X86AddressSanitizer() {} // X86AsmInstrumentation implementation: @@ -92,11 +93,6 @@ EmitInstruction(Out, Inst); } - // Should be implemented differently in x86_32 and x86_64 subclasses. - virtual void StoreFlags(MCStreamer &Out) = 0; - - virtual void RestoreFlags(MCStreamer &Out) = 0; - // Adjusts up stack and saves all registers used in instrumentation. virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, MCContext &Ctx, @@ -130,11 +126,33 @@ void InstrumentMOV(const MCInst &Inst, OperandVector &Operands, MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out); + std::unique_ptr AdjustMemOperand(X86Operand &Op, MCContext &Ctx) { + if (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant) + return std::unique_ptr(new X86Operand(Op)); + + long Displacement = 0; + if (IsStackReg(Op.getMemBaseReg())) + Displacement += OrigSPOffset; + if (IsStackReg(Op.getMemIndexReg() == X86::RSP)) + Displacement += OrigSPOffset * Op.getMemScale(); + + const MCExpr *Disp = MCConstantExpr::Create( + static_cast(Op.getMemDisp())->getValue() - + Displacement, + Ctx); + return X86Operand::CreateMem(0, Disp, Op.getMemBaseReg(), + Op.getMemIndexReg(), Op.getMemScale(), SMLoc(), + SMLoc()); + } + protected: void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); } // True when previous instruction was actually REP prefix. bool RepPrefix; + + // Offset from the original SP register. + int64_t OrigSPOffset; }; void X86AddressSanitizer::InstrumentMemOperand( @@ -143,11 +161,15 @@ assert(Op.isMem() && "Op should be a memory operand."); assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 && "AccessSize should be a power of two, less or equal than 16."); + std::unique_ptr AdjustedOp = AdjustMemOperand(Op, Ctx); // FIXME: take into account load/store alignment. - if (IsSmallMemAccess(AccessSize)) - InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out); - else - InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out); + if (IsSmallMemAccess(AccessSize)) { + InstrumentMemOperandSmall(*AdjustedOp, AccessSize, IsWrite, RegCtx, Ctx, + Out); + } else { + InstrumentMemOperandLarge(*AdjustedOp, AccessSize, IsWrite, RegCtx, Ctx, + Out); + } } void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, @@ -276,12 +298,6 @@ MCParsedAsmOperand &Op = *Operands[Ix]; if (Op.isMem()) { X86Operand &MemOp = static_cast(Op); - // FIXME: get rid of this limitation. - if (IsStackReg(MemOp.getMemBaseReg()) || - IsStackReg(MemOp.getMemIndexReg())) { - continue; - } - InstrumentMemOperandPrologue(RegCtx, Ctx, Out); InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out); InstrumentMemOperandEpilogue(RegCtx, Ctx, Out); @@ -305,12 +321,24 @@ return getX86SubSuperRegister(FrameReg, MVT::i32); } - virtual void StoreFlags(MCStreamer &Out) override { + virtual void StoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg)); + OrigSPOffset -= 4; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg)); + OrigSPOffset += 4; + } + + virtual void StoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::PUSHF32)); + OrigSPOffset -= 4; } - virtual void RestoreFlags(MCStreamer &Out) override { + virtual void RestoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::POPF32)); + OrigSPOffset += 4; } virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, @@ -319,8 +347,7 @@ const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EBP)); + StoreReg(Out, X86::EBP); if (FrameReg == X86::ESP) { Out.EmitCFIAdjustCfaOffset(4 /* byte size of the FrameReg */); Out.EmitCFIRelOffset( @@ -333,14 +360,10 @@ MRI->getDwarfRegNum(X86::EBP, true /* IsEH */)); } - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.addressReg(MVT::i32))); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.shadowReg(MVT::i32))); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.scratchReg(MVT::i32))); - } + StoreReg(Out, RegCtx.addressReg(MVT::i32)); + StoreReg(Out, RegCtx.shadowReg(MVT::i32)); + if (RegCtx.ScratchReg != X86::NoRegister) + StoreReg(Out, RegCtx.scratchReg(MVT::i32)); StoreFlags(Out); } @@ -348,19 +371,14 @@ MCContext &Ctx, MCStreamer &Out) override { RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.scratchReg(MVT::i32))); - } - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.shadowReg(MVT::i32))); - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.addressReg(MVT::i32))); + if (RegCtx.ScratchReg != X86::NoRegister) + RestoreReg(Out, RegCtx.scratchReg(MVT::i32)); + RestoreReg(Out, RegCtx.shadowReg(MVT::i32)); + RestoreReg(Out, RegCtx.addressReg(MVT::i32)); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(X86::EBP)); + RestoreReg(Out, X86::EBP); Out.EmitCFIRestoreState(); if (FrameReg == X86::ESP) Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the FrameReg */); @@ -571,12 +589,24 @@ return getX86SubSuperRegister(FrameReg, MVT::i64); } - virtual void StoreFlags(MCStreamer &Out) override { + void StoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg)); + OrigSPOffset -= 8; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg)); + OrigSPOffset += 8; + } + + virtual void StoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::PUSHF64)); + OrigSPOffset -= 8; } - virtual void RestoreFlags(MCStreamer &Out) override { + virtual void RestoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::POPF64)); + OrigSPOffset += 8; } virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, @@ -585,7 +615,7 @@ const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RBP)); + StoreReg(Out, X86::RBP); if (FrameReg == X86::RSP) { Out.EmitCFIAdjustCfaOffset(8 /* byte size of the FrameReg */); Out.EmitCFIRelOffset( @@ -599,14 +629,10 @@ } EmitAdjustRSP(Ctx, Out, -128); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.shadowReg(MVT::i64))); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.addressReg(MVT::i64))); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.scratchReg(MVT::i64))); - } + StoreReg(Out, RegCtx.shadowReg(MVT::i64)); + StoreReg(Out, RegCtx.addressReg(MVT::i64)); + if (RegCtx.ScratchReg != X86::NoRegister) + StoreReg(Out, RegCtx.scratchReg(MVT::i64)); StoreFlags(Out); } @@ -614,20 +640,15 @@ MCContext &Ctx, MCStreamer &Out) override { RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.scratchReg(MVT::i64))); - } - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.addressReg(MVT::i64))); - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.shadowReg(MVT::i64))); + if (RegCtx.ScratchReg != X86::NoRegister) + RestoreReg(Out, RegCtx.scratchReg(MVT::i64)); + RestoreReg(Out, RegCtx.addressReg(MVT::i64)); + RestoreReg(Out, RegCtx.shadowReg(MVT::i64)); EmitAdjustRSP(Ctx, Out, 128); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(X86::RBP)); + RestoreReg(Out, X86::RBP); Out.EmitCFIRestoreState(); if (FrameReg == X86::RSP) Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the FrameReg */); @@ -658,6 +679,8 @@ X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc())); Op->addMemOperands(Inst, 5); EmitInstruction(Out, Inst); + + OrigSPOffset += Offset; } void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx, @@ -701,6 +724,7 @@ Op.addMemOperands(Inst, 5); EmitInstruction(Out, Inst); } + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( AddressRegI64)); EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) Index: test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s =================================================================== --- /dev/null +++ test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s @@ -0,0 +1,25 @@ +# The test verifies that memory references through %rsp are correctly +# adjusted after instrumentation. + +# RUN: llvm-mc %s -triple=x86_64-unknown-linux-gnu -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s + +# CHECK-LABEL: rsp_access +# CHECK: leaq -128(%rsp), %rsp +# CHECK: pushq %rax +# CHECK: pushq %rdi +# CHECK: pushfq +# CHECK: leaq 160(%rsp), %rdi +# CHECK: callq __asan_report_load8@PLT +# CHECK: popfq +# CHECK: popq %rdi +# CHECK: popq %rax +# CHECK: leaq 128(%rsp), %rsp +# CHECK: movq 8(%rsp), %rax +# CHECK: retq + + .text + .globl rsp_access + .type rsp_access,@function +rsp_access: + movq 8(%rsp), %rax + retq