diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -667,7 +667,7 @@ assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid); } - uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { + TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const { uint64_t ArraySize = 1; if (AI.isArrayAllocation()) { const ConstantInt *CI = dyn_cast(AI.getArraySize()); @@ -675,7 +675,7 @@ ArraySize = CI->getZExtValue(); } Type *Ty = AI.getAllocatedType(); - uint64_t SizeInBytes = + TypeSize SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty); return SizeInBytes * ArraySize; } @@ -1040,7 +1040,9 @@ /// Collect Alloca instructions we want (and can) handle. void visitAllocaInst(AllocaInst &AI) { - if (!ASan.isInterestingAlloca(AI)) { + // FIXME: Handle scalable vectors instead of ignoring them. + if (!ASan.isInterestingAlloca(AI) || + isa(AI.getAllocatedType())) { if (AI.isStaticAlloca()) { // Skip over allocas that are present *before* the first instrumented // alloca, we don't want to move those around. @@ -1254,7 +1256,7 @@ bool IsInteresting = (AI.getAllocatedType()->isSized() && // alloca() may be called with 0 size, ignore it. - ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && + ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) && // We are only interested in allocas not promotable to registers. // Promotable allocas are common under -O0. (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && @@ -3492,6 +3494,10 @@ // constant inbounds index. bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, TypeSize TypeStoreSize) const { + if (TypeStoreSize.isScalable()) + // TODO: We can use vscale_range to convert a scalable value to an + // upper bound on the access size. + return false; SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); if (!ObjSizeVis.bothKnown(SizeOffset)) return false; uint64_t Size = SizeOffset.first.getZExtValue(); diff --git a/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll b/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll --- a/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll @@ -947,3 +947,67 @@ ret void } +declare void @clobber(ptr) + +define @local_alloca() sanitize_address { +; CHECK-LABEL: @local_alloca( +; CHECK-NEXT: [[A:%.*]] = alloca , align 8 +; CHECK-NEXT: call void @clobber(ptr [[A]]) +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr +; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3 +; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1 +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0 +; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]] +; CHECK: 14: +; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7 +; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8 +; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]] +; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]] +; CHECK: 18: +; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: 19: +; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64 +; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3 +; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416 +; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr +; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0 +; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]] +; CHECK: 26: +; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7 +; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8 +; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]] +; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]] +; CHECK: 30: +; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: 31: +; CHECK-NEXT: [[RES:%.*]] = load , ptr [[A]], align 8 +; CHECK-NEXT: ret [[RES]] +; +; CALLS-LABEL: @local_alloca( +; CALLS-NEXT: [[A:%.*]] = alloca , align 8 +; CALLS-NEXT: call void @clobber(ptr [[A]]) +; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64 +; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3 +; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 +; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]]) +; CALLS-NEXT: [[RES:%.*]] = load , ptr [[A]], align 8 +; CALLS-NEXT: ret [[RES]] +; + %a = alloca + call void @clobber(ptr %a) + %res = load , ptr %a + ret %res +}