diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1339,8 +1339,8 @@ def int_sideeffect : DefaultAttrsIntrinsic<[], [], [IntrInaccessibleMemOnly, IntrWillReturn]>; // The pseudoprobe intrinsic works as a place holder to the block it probes. -// Like the sideeffect intrinsic defined above, this intrinsic is treated by the -// optimizer as having opaque side effects so that it won't be get rid of or moved +// Like the sideeffect intrinsic defined above, this intrinsic is treated by the +// optimizer as having opaque side effects so that it won't be get rid of or moved // out of the block it probes. def int_pseudoprobe : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrInaccessibleMemOnly, IntrWillReturn]>; @@ -1637,6 +1637,9 @@ def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty], [IntrReadMem, IntrArgMemOnly]>; +def int_asan_check_memaccess : + Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + def int_hwasan_check_memaccess : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h @@ -155,6 +155,17 @@ bool UseOdrIndicator = true, AsanDtorKind DestructorKind = AsanDtorKind::Global); +namespace ASanAccessInfo { + +// Bit field positions for accessinfo parameter to llvm.asan.check.memaccess. +enum { + CompileKernelShift = 0, // 1 bit + AccessSizeShift = 1, // 4 bits + IsWriteShift = 5, // 1 bit +}; + +} // namespace ASanAccessInfo + } // namespace llvm #endif diff --git a/llvm/lib/Target/X86/X86AsmPrinter.h b/llvm/lib/Target/X86/X86AsmPrinter.h --- a/llvm/lib/Target/X86/X86AsmPrinter.h +++ b/llvm/lib/Target/X86/X86AsmPrinter.h @@ -98,6 +98,22 @@ void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL); + // Address sanitizer specific lowering for X86. + void LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI); + void emitAsanMemaccessSymbols(Module &M); + void emitAsanMemaccessPartial(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, bool CompileKernel, + MCSubtargetInfo &STI); + void emitAsanMemaccessFull(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, bool CompileKernel, + MCSubtargetInfo &STI); + void emitAsanReportError(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, MCSubtargetInfo &STI); + + typedef std::tuple + AsanMemaccessTuple; + std::map AsanMemaccessSymbols; + // Choose between emitting .seh_ directives and .cv_fpo_ directives. void EmitSEHInstruction(const MachineInstr *MI); diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp --- a/llvm/lib/Target/X86/X86AsmPrinter.cpp +++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp @@ -753,6 +753,8 @@ void X86AsmPrinter::emitEndOfAsmFile(Module &M) { const Triple &TT = TM.getTargetTriple(); + emitAsanMemaccessSymbols(M); + if (TT.isOSBinFormatMachO()) { // Mach-O uses non-lazy symbol stubs to encode per-TU information into // global table for symbol lookup. diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -260,6 +260,17 @@ "#SEH_Epilogue", []>; } +//===----------------------------------------------------------------------===// +// Pseudo instructions used by address sanitizer. +//===----------------------------------------------------------------------===// +let + Defs = [R8, EFLAGS] in { +def ASAN_CHECK_MEMACCESS : PseudoI< + (outs), (ins GR64NoR8:$addr, i32imm:$accessinfo), + [(int_asan_check_memaccess GR64NoR8:$addr, (i32 timm:$accessinfo))]>, + Sched<[]>; +} + //===----------------------------------------------------------------------===// // Pseudo instructions used by segmented stacks. // @@ -960,7 +971,7 @@ !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"), [(set GR32:$dst, - (!cast(frag # "_32") addr:$ptr, GR32:$val))]>, + (!cast(frag # "_32") addr:$ptr, GR32:$val))]>, OpSize32; def NAME#64 : RI> ASanAccessInfo::AccessSizeShift) & 0xf; + bool IsWrite = (AccessInfo >> ASanAccessInfo::IsWriteShift) & 1; + + MCSymbol *&Sym = AsanMemaccessSymbols[{Reg, AccessInfo}]; + if (!Sym) { + std::string Name = IsWrite ? "store" : "load"; + std::string SymName = "__asan_check_" + Name + + utostr(1 << AccessSizeIndex) + "_rn" + utostr(Reg); + Sym = OutContext.getOrCreateSymbol(SymName); + } + + EmitAndCountInstruction( + MCInstBuilder(X86::CALL64pcrel32) + .addExpr(MCSymbolRefExpr::create(Sym, OutContext))); +} + +void X86AsmPrinter::emitAsanMemaccessPartial(Module &M, unsigned Reg, + bool IsWrite, + size_t AccessSizeIndex, + bool CompileKernel, + MCSubtargetInfo &STI) { + assert(AccessSizeIndex == 0 || AccessSizeIndex == 1 || AccessSizeIndex == 2); + assert(Reg != X86::R8); + + uint64_t ShadowBase; + int MappingScale; + bool OrShadowOffset; + getAddressSanitizerParams( + Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(), + CompileKernel, &ShadowBase, &MappingScale, &OrShadowOffset); + + OutStreamer->emitInstruction( + MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(MappingScale), + STI); + if (OrShadowOffset) { + OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(ShadowBase), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::MOV8rm) + .addReg(X86::R8B) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(0) + .addReg(X86::NoRegister), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::TEST8rr).addReg(X86::R8B).addReg(X86::R8B), STI); + } else { + OutStreamer->emitInstruction(MCInstBuilder(X86::MOVSX32rm8) + .addReg(X86::R8D) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(ShadowBase) + .addReg(X86::NoRegister), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::TEST32rr).addReg(X86::R8D).addReg(X86::R8D), STI); + } + MCSymbol *AdditionalCheck = OutContext.createTempSymbol(); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JCC_1) + .addExpr(MCSymbolRefExpr::create(AdditionalCheck, OutContext)) + .addImm(X86::COND_NE), + STI); + MCSymbol *ReturnSym = OutContext.createTempSymbol(); + OutStreamer->emitLabel(ReturnSym); + OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI); + + // Shadow byte is non-zero so we need to perform additional checks. + OutStreamer->emitLabel(AdditionalCheck); + OutStreamer->emitInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RCX), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr) + .addReg(X86::RCX) + .addReg(X86::NoRegister + Reg), + STI); + const size_t Granularity = 1ULL << MappingScale; + OutStreamer->emitInstruction(MCInstBuilder(X86::AND32ri8) + .addReg(X86::ECX) + .addReg(X86::ECX) + .addImm(Granularity - 1), + STI); + if (AccessSizeIndex == 1) { + OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8) + .addReg(X86::ECX) + .addReg(X86::ECX) + .addImm(1), + STI); + } else if (AccessSizeIndex == 2) { + OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8) + .addReg(X86::ECX) + .addReg(X86::ECX) + .addImm(3), + STI); + } + + OutStreamer->emitInstruction( + MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::R8D).addImm(1), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RCX), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JCC_1) + .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)) + .addImm(X86::COND_L), + STI); + + emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI); +} + +void X86AsmPrinter::emitAsanMemaccessFull(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, + bool CompileKernel, + MCSubtargetInfo &STI) { + assert(AccessSizeIndex == 3 || AccessSizeIndex == 4); + assert(Reg != X86::R8); + + uint64_t ShadowBase; + int MappingScale; + bool OrShadowOffset; + getAddressSanitizerParams( + Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(), + CompileKernel, &ShadowBase, &MappingScale, &OrShadowOffset); + + OutStreamer->emitInstruction( + MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg), + STI); + OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(MappingScale), + STI); + if (OrShadowOffset) { + OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32) + .addReg(X86::R8) + .addReg(X86::R8) + .addImm(ShadowBase), + STI); + auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8; + OutStreamer->emitInstruction(MCInstBuilder(OpCode) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(0) + .addReg(X86::NoRegister) + .addImm(0), + STI); + } else { + auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8; + OutStreamer->emitInstruction(MCInstBuilder(OpCode) + .addReg(X86::R8) + .addImm(1) + .addReg(X86::NoRegister) + .addImm(ShadowBase) + .addReg(X86::NoRegister) + .addImm(0), + STI); + } + MCSymbol *ReportCode = OutContext.createTempSymbol(); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JCC_1) + .addExpr(MCSymbolRefExpr::create(ReportCode, OutContext)) + .addImm(X86::COND_NE), + STI); + MCSymbol *ReturnSym = OutContext.createTempSymbol(); + OutStreamer->emitLabel(ReturnSym); + OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI); + + OutStreamer->emitLabel(ReportCode); + emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI); +} + +void X86AsmPrinter::emitAsanReportError(Module &M, unsigned Reg, bool IsWrite, + size_t AccessSizeIndex, + MCSubtargetInfo &STI) { + std::string Name = IsWrite ? "store" : "load"; + MCSymbol *ReportError = OutContext.getOrCreateSymbol( + "__asan_report_" + Name + utostr(1 << AccessSizeIndex)); + OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr) + .addReg(X86::RDI) + .addReg(X86::NoRegister + Reg), + STI); + OutStreamer->emitInstruction( + MCInstBuilder(X86::JMP_1) + .addExpr(MCSymbolRefExpr::create(ReportError, OutContext)), + STI); +} + +void X86AsmPrinter::emitAsanMemaccessSymbols(Module &M) { + if (AsanMemaccessSymbols.empty()) + return; + + const Triple &TT = TM.getTargetTriple(); + assert(TT.isOSBinFormatELF()); + std::unique_ptr STI( + TM.getTarget().createMCSubtargetInfo(TT.str(), "", "")); + assert(STI && "Unable to create subtarget info"); + + for (auto &P : AsanMemaccessSymbols) { + MCSymbol *Sym = P.second; + OutStreamer->SwitchSection(OutContext.getELFSection( + ".text.hot", ELF::SHT_PROGBITS, + ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(), + /*IsComdat=*/true)); + + OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction); + OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak); + OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden); + OutStreamer->emitLabel(Sym); + + unsigned Reg = std::get<0>(P.first); + int32_t AccessInfo = std::get<1>(P.first); + + bool CompileKernel = (AccessInfo >> ASanAccessInfo::CompileKernelShift) & 1; + int32_t AccessSizeIndex = + (AccessInfo >> ASanAccessInfo::AccessSizeShift) & 0xf; + bool IsWrite = (AccessInfo >> ASanAccessInfo::IsWriteShift) & 1; + + if (AccessSizeIndex < 3) { + emitAsanMemaccessPartial(M, Reg, IsWrite, AccessSizeIndex, CompileKernel, + *STI); + } else { + emitAsanMemaccessFull(M, Reg, IsWrite, AccessSizeIndex, CompileKernel, + *STI); + } + } +} + void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL) { // PATCHABLE_OP minsize, opcode, operands @@ -2563,6 +2813,9 @@ EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); return; + case X86::ASAN_CHECK_MEMACCESS: + return LowerASAN_CHECK_MEMACCESS(*MI); + case X86::MORESTACK_RET_RESTORE_R10: // Return, then restore R10. EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); diff --git a/llvm/lib/Target/X86/X86RegisterInfo.td b/llvm/lib/Target/X86/X86RegisterInfo.td --- a/llvm/lib/Target/X86/X86RegisterInfo.td +++ b/llvm/lib/Target/X86/X86RegisterInfo.td @@ -436,6 +436,12 @@ (add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, RBX, R14, R15, R12, R13, RBP, RSP, RIP)>; +// GR64 - 64-bit GPRs without R8 and RIP. Could be used when emitting code for +// intrinsics, which use implict input registers. +def GR64NoR8 : RegisterClass<"X86", [i64], 64, + (add RAX, RCX, RDX, RSI, RDI, R9, R10, R11, + RBX, R14, R15, R12, R13, RBP, RSP)>; + // Segment registers for use by MOV instructions (and others) that have a // segment register as one operand. Always contain a 16-bit segment // descriptor. diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1751,14 +1751,26 @@ IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); + const int32_t AccessInfo = + (CompileKernel << ASanAccessInfo::CompileKernelShift) + + (IsWrite << ASanAccessInfo::IsWriteShift) + + (AccessSizeIndex << ASanAccessInfo::AccessSizeShift); if (UseCalls) { - if (Exp == 0) - IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], - AddrLong); - else - IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], - {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); + if (ClOptimizeCallbacks) { + Value *Ptr8 = IRB.CreatePointerCast(Addr, Int8PtrTy); + Module *M = IRB.GetInsertBlock()->getParent()->getParent(); + IRB.CreateCall( + Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess), + {Ptr8, ConstantInt::get(IRB.getInt32Ty(), AccessInfo)}); + } else { + if (Exp == 0) + IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], + AddrLong); + else + IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], + {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); + } return; } diff --git a/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll b/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll @@ -0,0 +1,213 @@ +; RUN: llc < %s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +define void @load1(i8* nocapture readonly %x) { +; CHECK: callq __asan_check_load1_rn[[RN1:.*]] +; CHECK: callq __asan_check_store1_rn[[RN1]] +; CHECK-NEXT: retq + call void @llvm.asan.check.memaccess(i8* %x, i32 0) + call void @llvm.asan.check.memaccess(i8* %x, i32 32) + ret void +} + +define void @load2(i16* nocapture readonly %x) { +; CHECK: callq __asan_check_load2_rn[[RN2:.*]] +; CHECK: callq __asan_check_store2_rn[[RN2]] +; CHECK-NEXT: retq + %1 = ptrtoint i16* %x to i64 + %2 = bitcast i16* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 2) + call void @llvm.asan.check.memaccess(i8* %2, i32 34) + ret void +} + +define void @load4(i32* nocapture readonly %x) { +; CHECK: callq __asan_check_load4_rn[[RN4:.*]] +; CHECK: callq __asan_check_store4_rn[[RN4]] +; CHECK-NEXT: retq + %1 = ptrtoint i32* %x to i64 + %2 = bitcast i32* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 4) + call void @llvm.asan.check.memaccess(i8* %2, i32 36) + ret void +} +define void @load8(i64* nocapture readonly %x) { +; CHECK: callq __asan_check_load8_rn[[RN8:.*]] +; CHECK: callq __asan_check_store8_rn[[RN8]] +; CHECK-NEXT: retq + %1 = ptrtoint i64* %x to i64 + %2 = bitcast i64* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 6) + call void @llvm.asan.check.memaccess(i8* %2, i32 38) + ret void +} + +define void @load16(i128* nocapture readonly %x) { +; CHECK: callq __asan_check_load16_rn[[RN16:.*]] +; CHECK: callq __asan_check_store16_rn[[RN16]] +; CHECK-NEXT: retq + %1 = ptrtoint i128* %x to i64 + %2 = bitcast i128* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 8) + call void @llvm.asan.check.memaccess(i8* %2, i32 40) + ret void +} + +; CHECK: __asan_check_load1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load1 + +; CHECK: __asan_check_load2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load2 + +; CHECK: __asan_check_load4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load4 + +; CHECK: __asan_check_load8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpb $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load8 + +; CHECK: __asan_check_load16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpw $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load16 + +; CHECK: __asan_check_store1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store1 + +; CHECK: __asan_check_store2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store2 + +; CHECK: __asan_check_store4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: movsbl 2147450880(%r8), %r8d +; CHECK-NEXT: testl %r8d, %r8d +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store4 + +; CHECK: __asan_check_store8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpb $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store8 + +; CHECK: __asan_check_store16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: cmpw $0, 2147450880(%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store16 + +declare void @llvm.asan.check.memaccess(i8*, i32 immarg) diff --git a/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll b/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll @@ -0,0 +1,223 @@ +; RUN: llc < %s | FileCheck %s + +target triple = "x86_64-pc-win" + +define void @load1(i8* nocapture readonly %x) { +; CHECK: callq __asan_check_load1_rn[[RN1:.*]] +; CHECK: callq __asan_check_store1_rn[[RN1]] +; CHECK-NEXT: retq + call void @llvm.asan.check.memaccess(i8* %x, i32 0) + call void @llvm.asan.check.memaccess(i8* %x, i32 32) + ret void +} + +define void @load2(i16* nocapture readonly %x) { +; CHECK: callq __asan_check_load2_rn[[RN2:.*]] +; CHECK: callq __asan_check_store2_rn[[RN2]] +; CHECK-NEXT: retq + %1 = ptrtoint i16* %x to i64 + %2 = bitcast i16* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 2) + call void @llvm.asan.check.memaccess(i8* %2, i32 34) + ret void +} + +define void @load4(i32* nocapture readonly %x) { +; CHECK: callq __asan_check_load4_rn[[RN4:.*]] +; CHECK: callq __asan_check_store4_rn[[RN4]] +; CHECK-NEXT: retq + %1 = ptrtoint i32* %x to i64 + %2 = bitcast i32* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 4) + call void @llvm.asan.check.memaccess(i8* %2, i32 36) + ret void +} +define void @load8(i64* nocapture readonly %x) { +; CHECK: callq __asan_check_load8_rn[[RN8:.*]] +; CHECK: callq __asan_check_store8_rn[[RN8]] +; CHECK-NEXT: retq + %1 = ptrtoint i64* %x to i64 + %2 = bitcast i64* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 6) + call void @llvm.asan.check.memaccess(i8* %2, i32 38) + ret void +} + +define void @load16(i128* nocapture readonly %x) { +; CHECK: callq __asan_check_load16_rn[[RN16:.*]] +; CHECK: callq __asan_check_store16_rn[[RN16]] +; CHECK-NEXT: retq + %1 = ptrtoint i128* %x to i64 + %2 = bitcast i128* %x to i8* + call void @llvm.asan.check.memaccess(i8* %2, i32 8) + call void @llvm.asan.check.memaccess(i8* %2, i32 40) + ret void +} + +; CHECK: __asan_check_load1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load1 + +; CHECK: __asan_check_load2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load2 + +; CHECK: __asan_check_load4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load4 + +; CHECK: __asan_check_load8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: cmpb $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load8 + +; CHECK: __asan_check_load16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: cmpw $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_load16 + +; CHECK: __asan_check_store1_rn[[RN1]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store1 + +; CHECK: __asan_check_store2_rn[[RN2]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $1, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store2 + +; CHECK: __asan_check_store4_rn[[RN4]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8 +; CHECK-NEXT: movb (%r8), %r8b +; CHECK-NEXT: testb %r8b, %r8b +; CHECK-NEXT: jne [[EXTRA:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[EXTRA]]: +; CHECK-NEXT: pushq %rcx +; CHECK-NEXT: movq [[REG]], %rcx +; CHECK-NEXT: andl $7, %ecx +; CHECK-NEXT: addl $3, %ecx +; CHECK-NEXT: cmpl %r8d, %ecx +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: jl [[RET]] +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store4 + +; CHECK: __asan_check_store8_rn[[RN8]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: cmpb $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store8 + +; CHECK: __asan_check_store16_rn[[RN16]]: +; CHECK-NEXT: movq [[REG:.*]], %r8 +; CHECK-NEXT: shrq $3, %r8 +; CHECK-NEXT: orq $17592186044416, %r8{{.*}} +; CHECK-NEXT: cmpw $0, (%r8) +; CHECK-NEXT: jne [[FAIL:.*]] +; CHECK-NEXT: [[RET:.*]]: +; CHECK-NEXT: retq +; CHECK-NEXT: [[FAIL]]: +; CHECK-NEXT: movq [[REG:.*]], %rdi +; CHECK-NEXT: jmp __asan_report_store16 + +declare void @llvm.asan.check.memaccess(i8*, i32 immarg) diff --git a/llvm/tools/llvm-exegesis/CMakeLists.txt b/llvm/tools/llvm-exegesis/CMakeLists.txt --- a/llvm/tools/llvm-exegesis/CMakeLists.txt +++ b/llvm/tools/llvm-exegesis/CMakeLists.txt @@ -1,4 +1,5 @@ set(LLVM_LINK_COMPONENTS + Instrumentation MC MCParser Support