diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -356,6 +356,10 @@ static cl::opt ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true)); +static cl::opt ClOptimizeCallbacks("asan-optimize-callbacks", + cl::desc("Optimize callbacks"), + cl::Hidden, cl::init(false)); + static cl::opt ClOptSameTemp( "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true)); @@ -657,6 +661,8 @@ C = &(M.getContext()); LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); + Int8PtrTy = Type::getInt8PtrTy(*C); + Int32Ty = Type::getInt32Ty(*C); TargetTriple = Triple(M.getTargetTriple()); Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); @@ -747,6 +753,8 @@ bool UseAfterScope; AsanDetectStackUseAfterReturnMode UseAfterReturn; Type *IntptrTy; + Type *Int8PtrTy; + Type *Int32Ty; ShadowMapping Mapping; FunctionCallee AsanHandleNoReturnFunc; FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; @@ -1766,9 +1774,20 @@ } IRBuilder<> IRB(InsertBefore); - Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); + const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); + if (UseCalls && ClOptimizeCallbacks) { + const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); + Module *M = IRB.GetInsertBlock()->getParent()->getParent(); + IRB.CreateCall( + Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess), + {IRB.CreatePointerCast(Addr, Int8PtrTy), + ConstantInt::get(Int32Ty, AccessInfo.Packed)}); + return; + } + + Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); if (UseCalls) { if (Exp == 0) IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll @@ -0,0 +1,86 @@ +; RUN: opt < %s -asan -enable-new-pm=0 -asan-instrumentation-with-call-threshold=0 \ +; RUN: -asan-optimize-callbacks -S | FileCheck %s --check-prefixes=LOAD,STORE +; RUN: opt < %s -asan -enable-new-pm=0 -asan-instrumentation-with-call-threshold=0 \ +; RUN: -asan-optimize-callbacks --asan-kernel -S | \ +; RUN: FileCheck %s --check-prefixes=LOAD-KERNEL,STORE-KERNEL + +target triple = "x86_64-unknown-linux-gnu" + +define void @load(i8* %p1, i16* %p2, i32* %p4, i64* %p8, i128* %p16) +sanitize_address { + %n1 = load i8, i8* %p1, align 1 + %n2 = load i16, i16* %p2, align 2 + %n4 = load i32, i32* %p4, align 4 + %n8 = load i64, i64* %p8, align 8 + %n16 = load i128, i128* %p16, align 16 +; LOAD: call void @llvm.asan.check.memaccess(i8* %p1, i32 0) +; LOAD-NEXT: %n1 = load i8, i8* %p1, align 1 +; LOAD-NEXT: %1 = bitcast i16* %p2 to i8* +; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 2) +; LOAD-NEXT: %n2 = load i16, i16* %p2, align 2 +; LOAD-NEXT: %2 = bitcast i32* %p4 to i8* +; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 4) +; LOAD-NEXT: %n4 = load i32, i32* %p4, align 4 +; LOAD-NEXT: %3 = bitcast i64* %p8 to i8* +; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 6) +; LOAD-NEXT: %n8 = load i64, i64* %p8, align 8 +; LOAD-NEXT: %4 = bitcast i128* %p16 to i8* +; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 8) +; LOAD-NEXT: %n16 = load i128, i128* %p16, align 16 + +; LOAD-KERNEL: call void @llvm.asan.check.memaccess(i8* %p1, i32 1) +; LOAD-KERNEL-NEXT: %n1 = load i8, i8* %p1, align 1 +; LOAD-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8* +; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 3) +; LOAD-KERNEL-NEXT: %n2 = load i16, i16* %p2, align 2 +; LOAD-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8* +; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 5) +; LOAD-KERNEL-NEXT: %n4 = load i32, i32* %p4, align 4 +; LOAD-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8* +; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 7) +; LOAD-KERNEL-NEXT: %n8 = load i64, i64* %p8, align 8 +; LOAD-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8* +; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 9) +; LOAD-KERNEL-NEXT: %n16 = load i128, i128* %p16, align 16 + ret void +} + +define void @store(i8* %p1, i16* %p2, i32* %p4, i64* %p8, i128* %p16) +sanitize_address { + store i8 0, i8* %p1, align 1 + store i16 0, i16* %p2, align 2 + store i32 0, i32* %p4, align 4 + store i64 0, i64* %p8, align 8 + store i128 0, i128* %p16, align 16 +; STORE: call void @llvm.asan.check.memaccess(i8* %p1, i32 32) +; STORE-NEXT: store i8 0, i8* %p1, align 1 +; STORE-NEXT: %1 = bitcast i16* %p2 to i8* +; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 34) +; STORE-NEXT: store i16 0, i16* %p2, align 2 +; STORE-NEXT: %2 = bitcast i32* %p4 to i8* +; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 36) +; STORE-NEXT: store i32 0, i32* %p4, align 4 +; STORE-NEXT: %3 = bitcast i64* %p8 to i8* +; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 38) +; STORE-NEXT: store i64 0, i64* %p8, align 8 +; STORE-NEXT: %4 = bitcast i128* %p16 to i8* +; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 40) +; STORE-NEXT: store i128 0, i128* %p16, align 16 + +; STORE-KERNEL: call void @llvm.asan.check.memaccess(i8* %p1, i32 33) +; STORE-KERNEL-NEXT: store i8 0, i8* %p1, align 1 +; STORE-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8* +; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 35) +; STORE-KERNEL-NEXT: store i16 0, i16* %p2, align 2 +; STORE-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8* +; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 37) +; STORE-KERNEL-NEXT: store i32 0, i32* %p4, align 4 +; STORE-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8* +; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 39) +; STORE-KERNEL-NEXT: store i64 0, i64* %p8, align 8 +; STORE-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8* +; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 41) +; STORE-KERNEL-NEXT: store i128 0, i128* %p16, align 16 +; STORE-KERNEL-NEXT: ret void + ret void +}