diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1637,6 +1637,10 @@ def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty], [IntrReadMem, IntrArgMemOnly]>; +def int_asan_check_memaccess : + Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i32_ty], + [IntrInaccessibleMemOnly, ImmArg>, ImmArg>]>; + def int_hwasan_check_memaccess : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; diff --git a/llvm/lib/Target/X86/X86AsmPrinter.h b/llvm/lib/Target/X86/X86AsmPrinter.h --- a/llvm/lib/Target/X86/X86AsmPrinter.h +++ b/llvm/lib/Target/X86/X86AsmPrinter.h @@ -98,6 +98,25 @@ void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL); + // Address sanitizer specific lowering for X86. + void LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI); + void emitAsanMemaccessSymbols(Module &M); + void emitAsanMemaccessPartial(Module &M, unsigned Reg, uint64_t ShadowBase, + bool IsWrite, size_t AccessSizeIndex, + size_t MappingScale, bool OrShadowOffset, + MCSubtargetInfo &STI); + void emitAsanMemaccessFull(Module &M, unsigned Reg, uint64_t ShadowBase, + bool IsWrite, size_t AccessSizeIndex, + size_t MappingScale, bool OrShadowOffset, + MCSubtargetInfo &STI); + void emitAsanReportError(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, MCSubtargetInfo &STI); + + typedef std::tuple + AsanMemaccessTuple; + std::map AsanMemaccessSymbols; + // Choose between emitting .seh_ directives and .cv_fpo_ directives. void EmitSEHInstruction(const MachineInstr *MI); diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp --- a/llvm/lib/Target/X86/X86AsmPrinter.cpp +++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp @@ -753,6 +753,8 @@ void X86AsmPrinter::emitEndOfAsmFile(Module &M) { const Triple &TT = TM.getTargetTriple(); + emitAsanMemaccessSymbols(M); + if (TT.isOSBinFormatMachO()) { // Mach-O uses non-lazy symbol stubs to encode per-TU information into // global table for symbol lookup. diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -260,6 +260,18 @@ "#SEH_Epilogue", []>; } +//===----------------------------------------------------------------------===// +// Pseudo instructions used by address sanitizer. +//===----------------------------------------------------------------------===// +let + Defs = [R8, EFLAGS] in { +def ASAN_CHECK_MEMACCESS : PseudoI< + (outs), (ins GR64NoR8:$addr, i64imm:$shadowbase, i32imm:$accessinfo), + [(int_asan_check_memaccess GR64NoR8:$addr, (i64 timm:$shadowbase), + (i32 timm:$accessinfo))]>, + Sched<[]>; +} + //===----------------------------------------------------------------------===// // Pseudo instructions used by segmented stacks. // diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -43,8 +43,10 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbolELF.h" +#include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/Instrumentation/AddressSanitizer.h" using namespace llvm; @@ -1323,6 +1325,247 @@ .addExpr(Op)); } +void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) { + // FIXME: Make this work on non-ELF. + if (!TM.getTargetTriple().isOSBinFormatELF()) { + report_fatal_error("llvm.hwa`san.check.memaccess only supported on ELF"); + return; + } + + unsigned Reg = MI.getOperand(0).getReg().id(); + uint64_t ShadowBase = MI.getOperand(1).getImm(); + uint32_t AccessInfo = MI.getOperand(2).getImm(); + bool IsWrite = (AccessInfo >> AsanAccessInfo::IsWriteShift) & 1; + size_t AccessSizeIndex = + (AccessInfo >> AsanAccessInfo::AccessSizeIndexShift) & 0xf; + + MCSymbol *&Sym = + AsanMemaccessSymbols[AsanMemaccessTuple(Reg, ShadowBase, AccessInfo)]; + if (!Sym) { + std::string Name = IsWrite ? "store" : "load"; + std::string SymName = "__asan_check_" + Name + + utostr(1 << AccessSizeIndex) + "_rn" + utostr(Reg); + Sym = OutContext.getOrCreateSymbol(SymName); + } + + EmitAndCountInstruction( + MCInstBuilder(X86::CALL64pcrel32) + .addExpr(MCSymbolRefExpr::create(Sym, OutContext))); +} + +void X86AsmPrinter::emitAsanMemaccessPartial(Module &M, unsigned Reg, + uint64_t ShadowBase, bool IsWrite, + size_t AccessSizeIndex, + size_t MappingScale, + bool OrShadowOffset, + MCSubtargetInfo &STI) { + assert(AccessSizeIndex == 0 || AccessSizeIndex == 1 || AccessSizeIndex == 2); + assert(Reg != X86::RAX); + + OutStreamer->emitInstruction( + MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(MappingScale), + STI); + if (OrShadowOffset) { + OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(ShadowBase), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::MOV8rm) + .addReg(X86::R8B) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(0) + .addReg(X86::NoRegister), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::TEST8rr).addReg(X86::R8B).addReg(X86::R8B), STI); + } else { + OutStreamer->emitInstruction(MCInstBuilder(X86::MOVSX32rm8) + .addReg(X86::R8D) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(ShadowBase) + .addReg(X86::NoRegister), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::TEST32rr).addReg(X86::R8D).addReg(X86::R8D), STI); + } + MCSymbol *AdditionalCheck = OutContext.createTempSymbol(); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JCC_1) + .addExpr(MCSymbolRefExpr::create(AdditionalCheck, OutContext)) + .addImm(X86::COND_NE), + STI); + MCSymbol *ReturnSym = OutContext.createTempSymbol(); + OutStreamer->emitLabel(ReturnSym); + OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI); + + // Shadow byte is non-zero so we need to perform additional checks. + OutStreamer->emitLabel(AdditionalCheck); + OutStreamer->emitInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RCX), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr) + .addReg(X86::RCX) + .addReg(X86::NoRegister + Reg), + STI); + const size_t Granularity = 1ULL << MappingScale; + OutStreamer->emitInstruction(MCInstBuilder(X86::AND32ri8) + .addReg(X86::ECX) + .addReg(X86::ECX) + .addImm(Granularity - 1), + STI); + if (AccessSizeIndex == 1) { + OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8) + .addReg(X86::ECX) + .addReg(X86::ECX) + .addImm(1), + STI); + } else if (AccessSizeIndex == 2) { + OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8) + .addReg(X86::ECX) + .addReg(X86::ECX) + .addImm(3), + STI); + } + + OutStreamer->emitInstruction( + MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::R8D).addImm(1), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RCX), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JCC_1) + .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)) + .addImm(X86::COND_L), + STI); + + emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI); +} + +void X86AsmPrinter::emitAsanMemaccessFull(Module &M, unsigned Reg, + uint64_t ShadowBase, bool IsWrite, + size_t AccessSizeIndex, + size_t MappingScale, + bool OrShadowOffset, + MCSubtargetInfo &STI) { + assert(AccessSizeIndex == 3 || AccessSizeIndex == 4); + assert(Reg != X86::R8); + OutStreamer->emitInstruction( + MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(MappingScale), + STI); + if (OrShadowOffset) { + OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(ShadowBase), + STI); + auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8; + OutStreamer->emitInstruction(MCInstBuilder(OpCode) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(0) + .addReg(X86::NoRegister) + .addImm(0), + STI); + } else { + auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8; + OutStreamer->emitInstruction(MCInstBuilder(OpCode) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(ShadowBase) + .addReg(X86::NoRegister) + .addImm(0), + STI); + } + MCSymbol *ReportCode = OutContext.createTempSymbol(); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JCC_1) + .addExpr(MCSymbolRefExpr::create(ReportCode, OutContext)) + .addImm(X86::COND_NE), + STI); + MCSymbol *ReturnSym = OutContext.createTempSymbol(); + OutStreamer->emitLabel(ReturnSym); + OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI); + + OutStreamer->emitLabel(ReportCode); + emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI); +} + +void X86AsmPrinter::emitAsanReportError(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, + MCSubtargetInfo &STI) { + std::string Name = IsWrite ? "store" : "load"; + MCSymbol *ReportError = OutContext.getOrCreateSymbol( + "__asan_report_" + Name + utostr(1 << AccessSizeIndex)); + OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr) + .addReg(X86::RDI) + .addReg(X86::NoRegister + Reg), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JMP_1) + .addExpr(MCSymbolRefExpr::create(ReportError, OutContext)), + STI); +} + +void X86AsmPrinter::emitAsanMemaccessSymbols(Module &M) { + if (AsanMemaccessSymbols.empty()) + return; + + const Triple &TT = TM.getTargetTriple(); + assert(TT.isOSBinFormatELF()); + std::unique_ptr STI( + TM.getTarget().createMCSubtargetInfo(TT.str(), "", "")); + assert(STI && "Unable to create subtarget info"); + + for (auto &P : AsanMemaccessSymbols) { + MCSymbol *Sym = P.second; + OutStreamer->SwitchSection(OutContext.getELFSection( + ".text.hot", ELF::SHT_PROGBITS, + ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(), + /*IsComdat=*/true)); + + OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction); + OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak); + OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden); + OutStreamer->emitLabel(Sym); + + unsigned Reg = std::get<0>(P.first); + uint64_t ShadowBase = std::get<1>(P.first); + uint32_t AccessInfo = std::get<2>(P.first); + + bool IsWrite = (AccessInfo >> AsanAccessInfo::IsWriteShift) & 1; + size_t AccessSizeIndex = + (AccessInfo >> AsanAccessInfo::AccessSizeIndexShift) & 0xf; + size_t MappingScale = + (AccessInfo >> AsanAccessInfo::MappingScaleShift) & 0xf; + bool OrShadowOffset = + (AccessInfo >> AsanAccessInfo::OrShadowOffsetShift) & 1; + + if (AccessSizeIndex < 3) { + emitAsanMemaccessPartial(M, Reg, ShadowBase, IsWrite, AccessSizeIndex, + MappingScale, OrShadowOffset, *STI); + } else { + emitAsanMemaccessFull(M, Reg, ShadowBase, IsWrite, AccessSizeIndex, + MappingScale, OrShadowOffset, *STI); + } + } +} + void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL) { // PATCHABLE_OP minsize, opcode, operands @@ -2563,6 +2806,9 @@ EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); return; + case X86::ASAN_CHECK_MEMACCESS: + return LowerASAN_CHECK_MEMACCESS(*MI); + case X86::MORESTACK_RET_RESTORE_R10: // Return, then restore R10. EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); diff --git a/llvm/lib/Target/X86/X86RegisterInfo.td b/llvm/lib/Target/X86/X86RegisterInfo.td --- a/llvm/lib/Target/X86/X86RegisterInfo.td +++ b/llvm/lib/Target/X86/X86RegisterInfo.td @@ -436,6 +436,12 @@ (add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, RBX, R14, R15, R12, R13, RBP, RSP, RIP)>; +// GR64 - 64-bit GPRs without R8 and RIP. Could be used when emitting code for +// intrinsics, which use implict input registers. +def GR64NoR8 : RegisterClass<"X86", [i64], 64, + (add RAX, RCX, RDX, RSI, RDI, R9, R10, R11, + RBX, R14, R15, R12, R13, RBP, RSP)>; + // Segment registers for use by MOV instructions (and others) that have a // segment register as one operand. Always contain a 16-bit segment // descriptor. diff --git a/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll b/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll @@ -0,0 +1,213 @@ +; RUN: llc < %s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +define void @load1(i8* nocapture readonly %x) { +; CHECK: callq __asan_check_load1_rn[[RN1:.*]] +; CHECK: callq __asan_check_store1_rn[[RN1]] +; CHECK-NEXT: retq + call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 96) + call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 112) + ret void +} + +define void @load2(i16* nocapture readonly %x) { +; CHECK: callq __asan_check_load2_rn[[RN2:.*]] +; CHECK: callq __asan_check_store2_rn[[RN2]] +; CHECK-NEXT: retq + %1 = ptrtoint i16* %x to i64 + %2 = bitcast i16* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 97) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 113) + ret void +} + +define void @load4(i32* nocapture readonly %x) { +; CHECK: callq __asan_check_load4_rn[[RN4:.*]] +; CHECK: callq __asan_check_store4_rn[[RN4]] +; CHECK-NEXT: retq + %1 = ptrtoint i32* %x to i64 + %2 = bitcast i32* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 98) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 114) + ret void +} +define void @load8(i64* nocapture readonly %x) { +; CHECK: callq __asan_check_load8_rn[[RN8:.*]] +; CHECK: callq __asan_check_store8_rn[[RN8]] +; CHECK-NEXT: retq + %1 = ptrtoint i64* %x to i64 + %2 = bitcast i64* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 99) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 115) + ret void +} + +define void @load16(i128* nocapture readonly %x) { +; CHECK: callq __asan_check_load16_rn[[RN16:.*]] +; CHECK: callq __asan_check_store16_rn[[RN16]] +; CHECK-NEXT: retq + %1 = ptrtoint i128* %x to i64 + %2 = bitcast i128* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 100) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 116) + ret void +} + +; CHECK: __asan_check_load1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load1 + +; CHECK: __asan_check_load2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load2 + +; CHECK: __asan_check_load4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load4 + +; CHECK: __asan_check_load8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpb $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load8 + +; CHECK: __asan_check_load16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpw $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load16 + +; CHECK: __asan_check_store1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store1 + +; CHECK: __asan_check_store2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store2 + +; CHECK: __asan_check_store4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store4 + +; CHECK: __asan_check_store8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpb $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store8 + +; CHECK: __asan_check_store16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpw $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store16 + +declare void @llvm.asan.check.memaccess(i8*, i64 immarg, i32 immarg) diff --git a/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll b/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll @@ -0,0 +1,223 @@ +; RUN: llc < %s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +define void @load1(i8* nocapture readonly %x) { +; CHECK: callq __asan_check_load1_rn[[RN1:.*]] +; CHECK: callq __asan_check_store1_rn[[RN1]] +; CHECK-NEXT: retq + call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 608) + call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 624) + ret void +} + +define void @load2(i16* nocapture readonly %x) { +; CHECK: callq __asan_check_load2_rn[[RN2:.*]] +; CHECK: callq __asan_check_store2_rn[[RN2]] +; CHECK-NEXT: retq + %1 = ptrtoint i16* %x to i64 + %2 = bitcast i16* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 609) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 625) + ret void +} + +define void @load4(i32* nocapture readonly %x) { +; CHECK: callq __asan_check_load4_rn[[RN4:.*]] +; CHECK: callq __asan_check_store4_rn[[RN4]] +; CHECK-NEXT: retq + %1 = ptrtoint i32* %x to i64 + %2 = bitcast i32* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 610) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 626) + ret void +} +define void @load8(i64* nocapture readonly %x) { +; CHECK: callq __asan_check_load8_rn[[RN8:.*]] +; CHECK: callq __asan_check_store8_rn[[RN8]] +; CHECK-NEXT: retq + %1 = ptrtoint i64* %x to i64 + %2 = bitcast i64* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 611) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 627) + ret void +} + +define void @load16(i128* nocapture readonly %x) { +; CHECK: callq __asan_check_load16_rn[[RN16:.*]] +; CHECK: callq __asan_check_store16_rn[[RN16]] +; CHECK-NEXT: retq + %1 = ptrtoint i128* %x to i64 + %2 = bitcast i128* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 612) + call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 628) + ret void +} + +; CHECK: __asan_check_load1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load1 + +; CHECK: __asan_check_load2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load2 + +; CHECK: __asan_check_load4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load4 + +; CHECK: __asan_check_load8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8{{.*}} +; CHECK-NEXT: cmpb $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load8 + +; CHECK: __asan_check_load16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8{{.*}} +; CHECK-NEXT: cmpw $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load16 + +; CHECK: __asan_check_store1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store1 + +; CHECK: __asan_check_store2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store2 + +; CHECK: __asan_check_store4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store4 + +; CHECK: __asan_check_store8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8{{.*}} +; CHECK-NEXT: cmpb $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store8 + +; CHECK: __asan_check_store16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $2147450880, %r8{{.*}} +; CHECK-NEXT: cmpw $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store16 + +declare void @llvm.asan.check.memaccess(i8*, i64 immarg, i32 immarg)