Index: lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp =================================================================== --- lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp +++ lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp @@ -26,6 +26,7 @@ #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/MC/MCTargetOptions.h" #include "llvm/Support/CommandLine.h" +#include namespace llvm { namespace { @@ -36,7 +37,7 @@ cl::init(false)); bool IsStackReg(unsigned Reg) { - return Reg == X86::RSP || Reg == X86::ESP || Reg == X86::SP; + return Reg == X86::RSP || Reg == X86::ESP; } bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; } @@ -72,7 +73,8 @@ }; X86AddressSanitizer(const MCSubtargetInfo &STI) - : X86AsmInstrumentation(STI), RepPrefix(false) {} + : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {} + virtual ~X86AddressSanitizer() {} // X86AsmInstrumentation implementation: @@ -92,11 +94,6 @@ EmitInstruction(Out, Inst); } - // Should be implemented differently in x86_32 and x86_64 subclasses. - virtual void StoreFlags(MCStreamer &Out) = 0; - - virtual void RestoreFlags(MCStreamer &Out) = 0; - // Adjusts up stack and saves all registers used in instrumentation. virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, MCContext &Ctx, @@ -135,6 +132,9 @@ // True when previous instruction was actually REP prefix. bool RepPrefix; + + // Offset from the original SP register. + int64_t OrigSPOffset; }; void X86AddressSanitizer::InstrumentMemOperand( @@ -276,12 +276,6 @@ MCParsedAsmOperand &Op = *Operands[Ix]; if (Op.isMem()) { X86Operand &MemOp = static_cast(Op); - // FIXME: get rid of this limitation. - if (IsStackReg(MemOp.getMemBaseReg()) || - IsStackReg(MemOp.getMemIndexReg())) { - continue; - } - InstrumentMemOperandPrologue(RegCtx, Ctx, Out); InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out); InstrumentMemOperandEpilogue(RegCtx, Ctx, Out); @@ -305,12 +299,24 @@ return getX86SubSuperRegister(FrameReg, MVT::i32); } - virtual void StoreFlags(MCStreamer &Out) override { + virtual void StoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg)); + OrigSPOffset -= 4; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg)); + OrigSPOffset += 4; + } + + virtual void StoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::PUSHF32)); + OrigSPOffset -= 4; } - virtual void RestoreFlags(MCStreamer &Out) override { + virtual void RestoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::POPF32)); + OrigSPOffset += 4; } virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, @@ -319,8 +325,7 @@ const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EBP)); + StoreReg(Out, X86::EBP); if (FrameReg == X86::ESP) { Out.EmitCFIAdjustCfaOffset(4 /* byte size of the FrameReg */); Out.EmitCFIRelOffset( @@ -333,14 +338,10 @@ MRI->getDwarfRegNum(X86::EBP, true /* IsEH */)); } - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.addressReg(MVT::i32))); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.shadowReg(MVT::i32))); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.scratchReg(MVT::i32))); - } + StoreReg(Out, RegCtx.addressReg(MVT::i32)); + StoreReg(Out, RegCtx.shadowReg(MVT::i32)); + if (RegCtx.ScratchReg != X86::NoRegister) + StoreReg(Out, RegCtx.scratchReg(MVT::i32)); StoreFlags(Out); } @@ -348,19 +349,14 @@ MCContext &Ctx, MCStreamer &Out) override { RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.scratchReg(MVT::i32))); - } - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.shadowReg(MVT::i32))); - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.addressReg(MVT::i32))); + if (RegCtx.ScratchReg != X86::NoRegister) + RestoreReg(Out, RegCtx.scratchReg(MVT::i32)); + RestoreReg(Out, RegCtx.shadowReg(MVT::i32)); + RestoreReg(Out, RegCtx.addressReg(MVT::i32)); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(X86::EBP)); + RestoreReg(Out, X86::EBP); Out.EmitCFIRestoreState(); if (FrameReg == X86::ESP) Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the FrameReg */); @@ -399,6 +395,39 @@ MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx); EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr)); } + + void ComputeMemOperandAddress(X86Operand &Op, unsigned Reg, MCContext &Ctx, + MCStreamer &Out) { + static const int64_t MaxAllowedDisplacement = + std::numeric_limits::max(); + + MCInst Inst; + Inst.setOpcode(X86::LEA32r); + Inst.addOperand(MCOperand::CreateReg(Reg)); + Op.addMemOperands(Inst, 5); + EmitInstruction(Out, Inst); + + int64_t Displacement = 0; + if (IsStackReg(Op.getMemBaseReg())) + Displacement -= OrigSPOffset; + if (IsStackReg(Op.getMemIndexReg())) + Displacement -= OrigSPOffset * Op.getMemScale(); + + while (Displacement >= 0) { + const MCConstantExpr *Disp = MCConstantExpr::Create( + std::min(MaxAllowedDisplacement, Displacement), Ctx); + std::unique_ptr DispOp = + X86Operand::CreateMem(0, Disp, Reg, 0, 1, SMLoc(), SMLoc()); + + MCInst Inst; + Inst.setOpcode(X86::LEA32r); + Inst.addOperand(MCOperand::CreateReg(Reg)); + DispOp->addMemOperands(Inst, 5); + EmitInstruction(Out, Inst); + + Displacement -= Disp->getValue(); + } + } }; void X86AddressSanitizer32::InstrumentMemOperandSmall( @@ -411,13 +440,7 @@ assert(RegCtx.ScratchReg != X86::NoRegister); unsigned ScratchRegI32 = RegCtx.scratchReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI32)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, AddressRegI32, Ctx, Out); EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg( AddressRegI32)); @@ -493,13 +516,7 @@ unsigned AddressRegI32 = RegCtx.addressReg(MVT::i32); unsigned ShadowRegI32 = RegCtx.shadowReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI32)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, AddressRegI32, Ctx, Out); EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg( AddressRegI32)); @@ -571,12 +588,24 @@ return getX86SubSuperRegister(FrameReg, MVT::i64); } - virtual void StoreFlags(MCStreamer &Out) override { + void StoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg)); + OrigSPOffset -= 8; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg)); + OrigSPOffset += 8; + } + + virtual void StoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::PUSHF64)); + OrigSPOffset -= 8; } - virtual void RestoreFlags(MCStreamer &Out) override { + virtual void RestoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::POPF64)); + OrigSPOffset += 8; } virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, @@ -585,7 +614,7 @@ const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RBP)); + StoreReg(Out, X86::RBP); if (FrameReg == X86::RSP) { Out.EmitCFIAdjustCfaOffset(8 /* byte size of the FrameReg */); Out.EmitCFIRelOffset( @@ -599,14 +628,10 @@ } EmitAdjustRSP(Ctx, Out, -128); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.shadowReg(MVT::i64))); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.addressReg(MVT::i64))); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.scratchReg(MVT::i64))); - } + StoreReg(Out, RegCtx.shadowReg(MVT::i64)); + StoreReg(Out, RegCtx.addressReg(MVT::i64)); + if (RegCtx.ScratchReg != X86::NoRegister) + StoreReg(Out, RegCtx.scratchReg(MVT::i64)); StoreFlags(Out); } @@ -614,20 +639,15 @@ MCContext &Ctx, MCStreamer &Out) override { RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.scratchReg(MVT::i64))); - } - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.addressReg(MVT::i64))); - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.shadowReg(MVT::i64))); + if (RegCtx.ScratchReg != X86::NoRegister) + RestoreReg(Out, RegCtx.scratchReg(MVT::i64)); + RestoreReg(Out, RegCtx.addressReg(MVT::i64)); + RestoreReg(Out, RegCtx.shadowReg(MVT::i64)); EmitAdjustRSP(Ctx, Out, 128); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(X86::RBP)); + RestoreReg(Out, X86::RBP); Out.EmitCFIRestoreState(); if (FrameReg == X86::RSP) Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the FrameReg */); @@ -658,6 +678,8 @@ X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc())); Op->addMemOperands(Inst, 5); EmitInstruction(Out, Inst); + + OrigSPOffset += Offset; } void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx, @@ -680,6 +702,39 @@ MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx); EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr)); } + + void ComputeMemOperandAddress(X86Operand &Op, unsigned Reg, MCContext &Ctx, + MCStreamer &Out) { + static const int64_t MaxAllowedDisplacement = + std::numeric_limits::max(); + + MCInst Inst; + Inst.setOpcode(X86::LEA64r); + Inst.addOperand(MCOperand::CreateReg(Reg)); + Op.addMemOperands(Inst, 5); + EmitInstruction(Out, Inst); + + int64_t Displacement = 0; + if (IsStackReg(Op.getMemBaseReg())) + Displacement -= OrigSPOffset; + if (IsStackReg(Op.getMemIndexReg())) + Displacement -= OrigSPOffset * Op.getMemScale(); + + while (Displacement >= 0) { + const MCConstantExpr *Disp = MCConstantExpr::Create( + std::min(MaxAllowedDisplacement, Displacement), Ctx); + std::unique_ptr DispOp = + X86Operand::CreateMem(0, Disp, Reg, 0, 1, SMLoc(), SMLoc()); + + MCInst Inst; + Inst.setOpcode(X86::LEA64r); + Inst.addOperand(MCOperand::CreateReg(Reg)); + DispOp->addMemOperands(Inst, 5); + EmitInstruction(Out, Inst); + + Displacement -= Disp->getValue(); + } + } }; void X86AddressSanitizer64::InstrumentMemOperandSmall( @@ -694,13 +749,8 @@ assert(RegCtx.ScratchReg != X86::NoRegister); unsigned ScratchRegI32 = RegCtx.scratchReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI64)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, AddressRegI64, Ctx, Out); + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( AddressRegI64)); EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) @@ -774,13 +824,8 @@ unsigned AddressRegI64 = RegCtx.addressReg(MVT::i64); unsigned ShadowRegI64 = RegCtx.shadowReg(MVT::i64); - { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI64)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, AddressRegI64, Ctx, Out); + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( AddressRegI64)); EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) Index: test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s =================================================================== --- /dev/null +++ test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s @@ -0,0 +1,26 @@ +# The test verifies that memory references through %rsp are correctly +# adjusted after instrumentation. + +# RUN: llvm-mc %s -triple=x86_64-unknown-linux-gnu -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s + +# CHECK-LABEL: rsp_access +# CHECK: leaq -128(%rsp), %rsp +# CHECK: pushq %rax +# CHECK: pushq %rdi +# CHECK: pushfq +# CHECK: leaq 8(%rsp), %rdi +# CHECK-NEXT: leaq 152(%rdi), %rdi +# CHECK: callq __asan_report_load8@PLT +# CHECK: popfq +# CHECK: popq %rdi +# CHECK: popq %rax +# CHECK: leaq 128(%rsp), %rsp +# CHECK: movq 8(%rsp), %rax +# CHECK: retq + + .text + .globl rsp_access + .type rsp_access,@function +rsp_access: + movq 8(%rsp), %rax + retq