diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1635,9 +1635,6 @@ def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty], [IntrReadMem, IntrArgMemOnly]>; -def int_asan_check_memaccess : - Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; - def int_hwasan_check_memaccess : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td --- a/llvm/include/llvm/IR/IntrinsicsX86.td +++ b/llvm/include/llvm/IR/IntrinsicsX86.td @@ -62,6 +62,13 @@ Intrinsic<[llvm_i32_ty], [], []>; } +//===----------------------------------------------------------------------===// +// Sanitizer support. +let TargetPrefix = "x86" in { +def int_x86_asan_check_memaccess : + Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; +} + //===----------------------------------------------------------------------===// // CET SS let TargetPrefix = "x86" in { diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -267,7 +267,7 @@ Defs = [R8, EFLAGS] in { def ASAN_CHECK_MEMACCESS : PseudoI< (outs), (ins GR64NoR8:$addr, i32imm:$accessinfo), - [(int_asan_check_memaccess GR64NoR8:$addr, (i32 timm:$accessinfo))]>, + [(int_x86_asan_check_memaccess GR64NoR8:$addr, (i32 timm:$accessinfo))]>, Sched<[]>; } diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -53,6 +53,7 @@ #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsX86.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" @@ -1798,10 +1799,12 @@ const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); if (UseCalls && ClOptimizeCallbacks) { + if (!TargetTriple.isX86()) + llvm_unreachable("Flag -asan-optimize-callbacks is X86 only!"); const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); Module *M = IRB.GetInsertBlock()->getParent()->getParent(); IRB.CreateCall( - Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess), + Intrinsic::getDeclaration(M, llvm::Intrinsic::x86_asan_check_memaccess), {IRB.CreatePointerCast(Addr, Int8PtrTy), ConstantInt::get(Int32Ty, AccessInfo.Packed)}); return; diff --git a/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll b/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll --- a/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll +++ b/llvm/test/CodeGen/X86/asan-check-memaccess-add.ll @@ -4,53 +4,48 @@ define void @load1(i8* nocapture readonly %x) { ; CHECK: callq __asan_check_load1_rn[[RN1:.*]] -; CHECK: callq __asan_check_store1_rn[[RN1]] -; CHECK-NEXT: retq - call void @llvm.asan.check.memaccess(i8* %x, i32 0) - call void @llvm.asan.check.memaccess(i8* %x, i32 32) +; CHECK-NEXT: callq __asan_check_store1_rn[[RN1]] + call void @llvm.x86.asan.check.memaccess(i8* %x, i32 0) + call void @llvm.x86.asan.check.memaccess(i8* %x, i32 32) ret void } define void @load2(i16* nocapture readonly %x) { ; CHECK: callq __asan_check_load2_rn[[RN2:.*]] -; CHECK: callq __asan_check_store2_rn[[RN2]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store2_rn[[RN2]] %1 = ptrtoint i16* %x to i64 %2 = bitcast i16* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 2) - call void @llvm.asan.check.memaccess(i8* %2, i32 34) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 2) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 34) ret void } define void @load4(i32* nocapture readonly %x) { ; CHECK: callq __asan_check_load4_rn[[RN4:.*]] -; CHECK: callq __asan_check_store4_rn[[RN4]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store4_rn[[RN4]] %1 = ptrtoint i32* %x to i64 %2 = bitcast i32* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 4) - call void @llvm.asan.check.memaccess(i8* %2, i32 36) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 4) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 36) ret void } define void @load8(i64* nocapture readonly %x) { ; CHECK: callq __asan_check_load8_rn[[RN8:.*]] -; CHECK: callq __asan_check_store8_rn[[RN8]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store8_rn[[RN8]] %1 = ptrtoint i64* %x to i64 %2 = bitcast i64* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 6) - call void @llvm.asan.check.memaccess(i8* %2, i32 38) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 6) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 38) ret void } define void @load16(i128* nocapture readonly %x) { ; CHECK: callq __asan_check_load16_rn[[RN16:.*]] -; CHECK: callq __asan_check_store16_rn[[RN16]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store16_rn[[RN16]] %1 = ptrtoint i128* %x to i64 %2 = bitcast i128* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 8) - call void @llvm.asan.check.memaccess(i8* %2, i32 40) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 8) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 40) ret void } @@ -240,4 +235,4 @@ ; CHECK-NEXT: movq [[REG:.*]], %rdi ; CHECK-NEXT: jmp __asan_report_store16@PLT -declare void @llvm.asan.check.memaccess(i8*, i32 immarg) +declare void @llvm.x86.asan.check.memaccess(i8*, i32 immarg) diff --git a/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll b/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll --- a/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll +++ b/llvm/test/CodeGen/X86/asan-check-memaccess-or.ll @@ -4,56 +4,50 @@ define void @load1(i8* nocapture readonly %x) { ; CHECK: callq __asan_check_load1_rn[[RN1:.*]] -; CHECK: callq __asan_check_store1_rn[[RN1]] -; CHECK-NEXT: retq - call void @llvm.asan.check.memaccess(i8* %x, i32 0) - call void @llvm.asan.check.memaccess(i8* %x, i32 32) +; CHECK-NEXT: callq __asan_check_store1_rn[[RN1]] + call void @llvm.x86.asan.check.memaccess(i8* %x, i32 0) + call void @llvm.x86.asan.check.memaccess(i8* %x, i32 32) ret void } define void @load2(i16* nocapture readonly %x) { ; CHECK: callq __asan_check_load2_rn[[RN2:.*]] -; CHECK: callq __asan_check_store2_rn[[RN2]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store2_rn[[RN2]] %1 = ptrtoint i16* %x to i64 %2 = bitcast i16* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 2) - call void @llvm.asan.check.memaccess(i8* %2, i32 34) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 2) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 34) ret void } define void @load4(i32* nocapture readonly %x) { ; CHECK: callq __asan_check_load4_rn[[RN4:.*]] -; CHECK: callq __asan_check_store4_rn[[RN4]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store4_rn[[RN4]] %1 = ptrtoint i32* %x to i64 %2 = bitcast i32* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 4) - call void @llvm.asan.check.memaccess(i8* %2, i32 36) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 4) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 36) ret void } define void @load8(i64* nocapture readonly %x) { ; CHECK: callq __asan_check_load8_rn[[RN8:.*]] -; CHECK: callq __asan_check_store8_rn[[RN8]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store8_rn[[RN8]] %1 = ptrtoint i64* %x to i64 %2 = bitcast i64* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 6) - call void @llvm.asan.check.memaccess(i8* %2, i32 38) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 6) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 38) ret void } define void @load16(i128* nocapture readonly %x) { ; CHECK: callq __asan_check_load16_rn[[RN16:.*]] -; CHECK: callq __asan_check_store16_rn[[RN16]] -; CHECK-NEXT: retq +; CHECK-NEXT: callq __asan_check_store16_rn[[RN16]] %1 = ptrtoint i128* %x to i64 %2 = bitcast i128* %x to i8* - call void @llvm.asan.check.memaccess(i8* %2, i32 8) - call void @llvm.asan.check.memaccess(i8* %2, i32 40) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 8) + call void @llvm.x86.asan.check.memaccess(i8* %2, i32 40) ret void } - ; CHECK: .type __asan_check_load1_rn[[RN1]],@function ; CHECK-NEXT: .weak __asan_check_load1_rn[[RN1]] ; CHECK-NEXT: .hidden __asan_check_load1_rn[[RN1]] @@ -250,4 +244,4 @@ ; CHECK-NEXT: movq [[REG:.*]], %rdi ; CHECK-NEXT: jmp __asan_report_store16@PLT -declare void @llvm.asan.check.memaccess(i8*, i32 immarg) +declare void @llvm.x86.asan.check.memaccess(i8*, i32 immarg) diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll --- a/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll @@ -13,34 +13,34 @@ %n4 = load i32, i32* %p4, align 4 %n8 = load i64, i64* %p8, align 8 %n16 = load i128, i128* %p16, align 16 -; LOAD: call void @llvm.asan.check.memaccess(i8* %p1, i32 0) +; LOAD: call void @llvm.x86.asan.check.memaccess(i8* %p1, i32 0) ; LOAD-NEXT: %n1 = load i8, i8* %p1, align 1 ; LOAD-NEXT: %1 = bitcast i16* %p2 to i8* -; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 2) +; LOAD-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %1, i32 2) ; LOAD-NEXT: %n2 = load i16, i16* %p2, align 2 ; LOAD-NEXT: %2 = bitcast i32* %p4 to i8* -; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 4) +; LOAD-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %2, i32 4) ; LOAD-NEXT: %n4 = load i32, i32* %p4, align 4 ; LOAD-NEXT: %3 = bitcast i64* %p8 to i8* -; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 6) +; LOAD-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %3, i32 6) ; LOAD-NEXT: %n8 = load i64, i64* %p8, align 8 ; LOAD-NEXT: %4 = bitcast i128* %p16 to i8* -; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 8) +; LOAD-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %4, i32 8) ; LOAD-NEXT: %n16 = load i128, i128* %p16, align 16 -; LOAD-KERNEL: call void @llvm.asan.check.memaccess(i8* %p1, i32 1) +; LOAD-KERNEL: call void @llvm.x86.asan.check.memaccess(i8* %p1, i32 1) ; LOAD-KERNEL-NEXT: %n1 = load i8, i8* %p1, align 1 ; LOAD-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8* -; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 3) +; LOAD-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %1, i32 3) ; LOAD-KERNEL-NEXT: %n2 = load i16, i16* %p2, align 2 ; LOAD-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8* -; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 5) +; LOAD-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %2, i32 5) ; LOAD-KERNEL-NEXT: %n4 = load i32, i32* %p4, align 4 ; LOAD-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8* -; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 7) +; LOAD-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %3, i32 7) ; LOAD-KERNEL-NEXT: %n8 = load i64, i64* %p8, align 8 ; LOAD-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8* -; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 9) +; LOAD-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %4, i32 9) ; LOAD-KERNEL-NEXT: %n16 = load i128, i128* %p16, align 16 ret void } @@ -52,34 +52,34 @@ store i32 0, i32* %p4, align 4 store i64 0, i64* %p8, align 8 store i128 0, i128* %p16, align 16 -; STORE: call void @llvm.asan.check.memaccess(i8* %p1, i32 32) +; STORE: call void @llvm.x86.asan.check.memaccess(i8* %p1, i32 32) ; STORE-NEXT: store i8 0, i8* %p1, align 1 ; STORE-NEXT: %1 = bitcast i16* %p2 to i8* -; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 34) +; STORE-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %1, i32 34) ; STORE-NEXT: store i16 0, i16* %p2, align 2 ; STORE-NEXT: %2 = bitcast i32* %p4 to i8* -; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 36) +; STORE-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %2, i32 36) ; STORE-NEXT: store i32 0, i32* %p4, align 4 ; STORE-NEXT: %3 = bitcast i64* %p8 to i8* -; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 38) +; STORE-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %3, i32 38) ; STORE-NEXT: store i64 0, i64* %p8, align 8 ; STORE-NEXT: %4 = bitcast i128* %p16 to i8* -; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 40) +; STORE-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %4, i32 40) ; STORE-NEXT: store i128 0, i128* %p16, align 16 -; STORE-KERNEL: call void @llvm.asan.check.memaccess(i8* %p1, i32 33) +; STORE-KERNEL: call void @llvm.x86.asan.check.memaccess(i8* %p1, i32 33) ; STORE-KERNEL-NEXT: store i8 0, i8* %p1, align 1 ; STORE-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8* -; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 35) +; STORE-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %1, i32 35) ; STORE-KERNEL-NEXT: store i16 0, i16* %p2, align 2 ; STORE-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8* -; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 37) +; STORE-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %2, i32 37) ; STORE-KERNEL-NEXT: store i32 0, i32* %p4, align 4 ; STORE-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8* -; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 39) +; STORE-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %3, i32 39) ; STORE-KERNEL-NEXT: store i64 0, i64* %p8, align 8 ; STORE-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8* -; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 41) +; STORE-KERNEL-NEXT: call void @llvm.x86.asan.check.memaccess(i8* %4, i32 41) ; STORE-KERNEL-NEXT: store i128 0, i128* %p16, align 16 ; STORE-KERNEL-NEXT: ret void ret void