Index: llvm/trunk/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp =================================================================== --- llvm/trunk/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp +++ llvm/trunk/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp @@ -26,6 +26,7 @@ #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/MC/MCTargetOptions.h" #include "llvm/Support/CommandLine.h" +#include namespace llvm { namespace { @@ -35,10 +36,21 @@ cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden, cl::init(false)); -bool IsStackReg(unsigned Reg) { - return Reg == X86::RSP || Reg == X86::ESP || Reg == X86::SP; +const int64_t MinAllowedDisplacement = std::numeric_limits::min(); +const int64_t MaxAllowedDisplacement = std::numeric_limits::max(); + +int64_t ApplyBounds(int64_t Displacement) { + return std::max(std::min(MaxAllowedDisplacement, Displacement), + MinAllowedDisplacement); +} + +bool InBounds(int64_t Displacement) { + return Displacement >= MinAllowedDisplacement && + Displacement <= MaxAllowedDisplacement; } +bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; } + bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; } std::string FuncName(unsigned AccessSize, bool IsWrite) { @@ -72,7 +84,8 @@ }; X86AddressSanitizer(const MCSubtargetInfo &STI) - : X86AsmInstrumentation(STI), RepPrefix(false) {} + : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {} + virtual ~X86AddressSanitizer() {} // X86AsmInstrumentation implementation: @@ -92,11 +105,6 @@ EmitInstruction(Out, Inst); } - // Should be implemented differently in x86_32 and x86_64 subclasses. - virtual void StoreFlags(MCStreamer &Out) = 0; - - virtual void RestoreFlags(MCStreamer &Out) = 0; - // Adjusts up stack and saves all registers used in instrumentation. virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, MCContext &Ctx, @@ -133,8 +141,31 @@ protected: void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); } + void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg, + MCStreamer &Out) { + assert(VT == MVT::i32 || VT == MVT::i64); + MCInst Inst; + Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r); + Inst.addOperand(MCOperand::CreateReg(getX86SubSuperRegister(Reg, VT))); + Op.addMemOperands(Inst, 5); + EmitInstruction(Out, Inst); + } + + void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT, + unsigned Reg, MCContext &Ctx, MCStreamer &Out); + + // Creates new memory operand with Displacement added to an original + // displacement. Residue will contain a residue which could happen when the + // total displacement exceeds 32-bit limitation. + std::unique_ptr AddDisplacement(X86Operand &Op, + int64_t Displacement, + MCContext &Ctx, int64_t *Residue); + // True when previous instruction was actually REP prefix. bool RepPrefix; + + // Offset from the original SP register. + int64_t OrigSPOffset; }; void X86AddressSanitizer::InstrumentMemOperand( @@ -276,12 +307,6 @@ MCParsedAsmOperand &Op = *Operands[Ix]; if (Op.isMem()) { X86Operand &MemOp = static_cast(Op); - // FIXME: get rid of this limitation. - if (IsStackReg(MemOp.getMemBaseReg()) || - IsStackReg(MemOp.getMemIndexReg())) { - continue; - } - InstrumentMemOperandPrologue(RegCtx, Ctx, Out); InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out); InstrumentMemOperandEpilogue(RegCtx, Ctx, Out); @@ -289,6 +314,67 @@ } } +void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op, + MVT::SimpleValueType VT, + unsigned Reg, MCContext &Ctx, + MCStreamer &Out) { + int64_t Displacement = 0; + if (IsStackReg(Op.getMemBaseReg())) + Displacement -= OrigSPOffset; + if (IsStackReg(Op.getMemIndexReg())) + Displacement -= OrigSPOffset * Op.getMemScale(); + + assert(Displacement >= 0); + + // Emit Op as is. + if (Displacement == 0) { + EmitLEA(Op, VT, Reg, Out); + return; + } + + int64_t Residue; + std::unique_ptr NewOp = + AddDisplacement(Op, Displacement, Ctx, &Residue); + EmitLEA(*NewOp, VT, Reg, Out); + + while (Residue != 0) { + const MCConstantExpr *Disp = + MCConstantExpr::Create(ApplyBounds(Residue), Ctx); + std::unique_ptr DispOp = + X86Operand::CreateMem(0, Disp, Reg, 0, 1, SMLoc(), SMLoc()); + EmitLEA(*DispOp, VT, Reg, Out); + Residue -= Disp->getValue(); + } +} + +std::unique_ptr +X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement, + MCContext &Ctx, int64_t *Residue) { + assert(Displacement >= 0); + + if (Displacement == 0 || + (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) { + *Residue = Displacement; + return X86Operand::CreateMem(Op.getMemSegReg(), Op.getMemDisp(), + Op.getMemBaseReg(), Op.getMemIndexReg(), + Op.getMemScale(), SMLoc(), SMLoc()); + } + + int64_t OrigDisplacement = + static_cast(Op.getMemDisp())->getValue(); + assert(InBounds(OrigDisplacement)); + Displacement += OrigDisplacement; + + int64_t NewDisplacement = ApplyBounds(Displacement); + assert(InBounds(NewDisplacement)); + + *Residue = Displacement - NewDisplacement; + const MCExpr *Disp = MCConstantExpr::Create(NewDisplacement, Ctx); + return X86Operand::CreateMem(Op.getMemSegReg(), Disp, Op.getMemBaseReg(), + Op.getMemIndexReg(), Op.getMemScale(), SMLoc(), + SMLoc()); +} + class X86AddressSanitizer32 : public X86AddressSanitizer { public: static const long kShadowOffset = 0x20000000; @@ -305,12 +391,24 @@ return getX86SubSuperRegister(FrameReg, MVT::i32); } - virtual void StoreFlags(MCStreamer &Out) override { + void SpillReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg)); + OrigSPOffset -= 4; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg)); + OrigSPOffset += 4; + } + + void StoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::PUSHF32)); + OrigSPOffset -= 4; } - virtual void RestoreFlags(MCStreamer &Out) override { + void RestoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::POPF32)); + OrigSPOffset += 4; } virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, @@ -319,28 +417,21 @@ const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EBP)); + SpillReg(Out, X86::EBP); if (FrameReg == X86::ESP) { Out.EmitCFIAdjustCfaOffset(4 /* byte size of the FrameReg */); - Out.EmitCFIRelOffset( - MRI->getDwarfRegNum(X86::EBP, true /* IsEH */), 0); + Out.EmitCFIRelOffset(MRI->getDwarfRegNum(X86::EBP, true /* IsEH */), 0); } EmitInstruction( Out, MCInstBuilder(X86::MOV32rr).addReg(X86::EBP).addReg(FrameReg)); Out.EmitCFIRememberState(); - Out.EmitCFIDefCfaRegister( - MRI->getDwarfRegNum(X86::EBP, true /* IsEH */)); + Out.EmitCFIDefCfaRegister(MRI->getDwarfRegNum(X86::EBP, true /* IsEH */)); } - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.addressReg(MVT::i32))); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.shadowReg(MVT::i32))); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.scratchReg(MVT::i32))); - } + SpillReg(Out, RegCtx.addressReg(MVT::i32)); + SpillReg(Out, RegCtx.shadowReg(MVT::i32)); + if (RegCtx.ScratchReg != X86::NoRegister) + SpillReg(Out, RegCtx.scratchReg(MVT::i32)); StoreFlags(Out); } @@ -348,19 +439,14 @@ MCContext &Ctx, MCStreamer &Out) override { RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.scratchReg(MVT::i32))); - } - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.shadowReg(MVT::i32))); - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(RegCtx.addressReg(MVT::i32))); + if (RegCtx.ScratchReg != X86::NoRegister) + RestoreReg(Out, RegCtx.scratchReg(MVT::i32)); + RestoreReg(Out, RegCtx.shadowReg(MVT::i32)); + RestoreReg(Out, RegCtx.addressReg(MVT::i32)); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP32r).addReg(X86::EBP)); + RestoreReg(Out, X86::EBP); Out.EmitCFIRestoreState(); if (FrameReg == X86::ESP) Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the FrameReg */); @@ -411,13 +497,7 @@ assert(RegCtx.ScratchReg != X86::NoRegister); unsigned ScratchRegI32 = RegCtx.scratchReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI32)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out); EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg( AddressRegI32)); @@ -454,15 +534,10 @@ case 1: break; case 2: { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(ScratchRegI32)); - const MCExpr *Disp = MCConstantExpr::Create(1, Ctx); std::unique_ptr Op( X86Operand::CreateMem(0, Disp, ScratchRegI32, 0, 1, SMLoc(), SMLoc())); - Op->addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); + EmitLEA(*Op, MVT::i32, ScratchRegI32, Out); break; } case 4: @@ -493,13 +568,7 @@ unsigned AddressRegI32 = RegCtx.addressReg(MVT::i32); unsigned ShadowRegI32 = RegCtx.shadowReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI32)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out); EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg( AddressRegI32)); @@ -571,12 +640,24 @@ return getX86SubSuperRegister(FrameReg, MVT::i64); } - virtual void StoreFlags(MCStreamer &Out) override { + void SpillReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg)); + OrigSPOffset -= 8; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg)); + OrigSPOffset += 8; + } + + void StoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::PUSHF64)); + OrigSPOffset -= 8; } - virtual void RestoreFlags(MCStreamer &Out) override { + void RestoreFlags(MCStreamer &Out) { EmitInstruction(Out, MCInstBuilder(X86::POPF64)); + OrigSPOffset += 8; } virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, @@ -585,28 +666,22 @@ const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); unsigned FrameReg = GetFrameReg(Ctx, Out); if (MRI && FrameReg != X86::NoRegister) { - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RBP)); + SpillReg(Out, X86::RBP); if (FrameReg == X86::RSP) { Out.EmitCFIAdjustCfaOffset(8 /* byte size of the FrameReg */); - Out.EmitCFIRelOffset( - MRI->getDwarfRegNum(X86::RBP, true /* IsEH */), 0); + Out.EmitCFIRelOffset(MRI->getDwarfRegNum(X86::RBP, true /* IsEH */), 0); } EmitInstruction( Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RBP).addReg(FrameReg)); Out.EmitCFIRememberState(); - Out.EmitCFIDefCfaRegister( - MRI->getDwarfRegNum(X86::RBP, true /* IsEH */)); + Out.EmitCFIDefCfaRegister(MRI->getDwarfRegNum(X86::RBP, true /* IsEH */)); } EmitAdjustRSP(Ctx, Out, -128); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.shadowReg(MVT::i64))); - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.addressReg(MVT::i64))); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::PUSH64r).addReg(RegCtx.scratchReg(MVT::i64))); - } + SpillReg(Out, RegCtx.shadowReg(MVT::i64)); + SpillReg(Out, RegCtx.addressReg(MVT::i64)); + if (RegCtx.ScratchReg != X86::NoRegister) + SpillReg(Out, RegCtx.scratchReg(MVT::i64)); StoreFlags(Out); } @@ -614,20 +689,15 @@ MCContext &Ctx, MCStreamer &Out) override { RestoreFlags(Out); - if (RegCtx.ScratchReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.scratchReg(MVT::i64))); - } - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.addressReg(MVT::i64))); - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(RegCtx.shadowReg(MVT::i64))); + if (RegCtx.ScratchReg != X86::NoRegister) + RestoreReg(Out, RegCtx.scratchReg(MVT::i64)); + RestoreReg(Out, RegCtx.addressReg(MVT::i64)); + RestoreReg(Out, RegCtx.shadowReg(MVT::i64)); EmitAdjustRSP(Ctx, Out, 128); unsigned FrameReg = GetFrameReg(Ctx, Out); if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { - EmitInstruction( - Out, MCInstBuilder(X86::POP64r).addReg(X86::RBP)); + RestoreReg(Out, X86::RBP); Out.EmitCFIRestoreState(); if (FrameReg == X86::RSP) Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the FrameReg */); @@ -649,15 +719,11 @@ private: void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(X86::RSP)); - const MCExpr *Disp = MCConstantExpr::Create(Offset, Ctx); std::unique_ptr Op( X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc())); - Op->addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); + EmitLEA(*Op, MVT::i64, X86::RSP, Out); + OrigSPOffset += Offset; } void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx, @@ -694,13 +760,8 @@ assert(RegCtx.ScratchReg != X86::NoRegister); unsigned ScratchRegI32 = RegCtx.scratchReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI64)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out); + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( AddressRegI64)); EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) @@ -735,15 +796,10 @@ case 1: break; case 2: { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(ScratchRegI32)); - const MCExpr *Disp = MCConstantExpr::Create(1, Ctx); std::unique_ptr Op( X86Operand::CreateMem(0, Disp, ScratchRegI32, 0, 1, SMLoc(), SMLoc())); - Op->addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); + EmitLEA(*Op, MVT::i32, ScratchRegI32, Out); break; } case 4: @@ -774,13 +830,8 @@ unsigned AddressRegI64 = RegCtx.addressReg(MVT::i64); unsigned ShadowRegI64 = RegCtx.shadowReg(MVT::i64); - { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(AddressRegI64)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out); + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( AddressRegI64)); EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) Index: llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s =================================================================== --- llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s +++ llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_rsp_mem_op.s @@ -0,0 +1,45 @@ +# The test verifies that memory references through %rsp are correctly +# adjusted after instrumentation. + +# RUN: llvm-mc %s -triple=x86_64-unknown-linux-gnu -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s + +# CHECK-LABEL: rsp_access +# CHECK: leaq -128(%rsp), %rsp +# CHECK: pushq %rax +# CHECK: pushq %rdi +# CHECK: pushfq +# CHECK: leaq 160(%rsp), %rdi +# CHECK: callq __asan_report_load8@PLT +# CHECK: popfq +# CHECK: popq %rdi +# CHECK: popq %rax +# CHECK: leaq 128(%rsp), %rsp +# CHECK: movq 8(%rsp), %rax +# CHECK: retq + + .text + .globl rsp_access + .type rsp_access,@function +rsp_access: + movq 8(%rsp), %rax + retq + +# CHECK-LABEL: rsp_32bit_access +# CHECK: leaq -128(%rsp), %rsp +# CHECK: pushq %rax +# CHECK: pushq %rdi +# CHECK: pushfq +# CHECK: leaq 2147483647(%rsp), %rdi +# CHECK: leaq 145(%rdi), %rdi +# CHECK: callq __asan_report_load8@PLT +# CHECK: popfq +# CHECK: popq %rdi +# CHECK: popq %rax +# CHECK: leaq 128(%rsp), %rsp +# CHECK: movq 2147483640(%rsp), %rax +# CHECK: retq + .globl rsp_32bit_access + .type rsp_32bit_access,@function +rsp_32bit_access: + movq 2147483640(%rsp), %rax + retq