diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -408,6 +408,23 @@ assert(V == UI.get()); + auto RecordWrite = [&](unsigned ValOp) { + if (V == I->getOperand(ValOp)) { + // Stored the pointer - conservatively assume it may be unsafe. + US.addRange(I, UnknownRange, /*IsSafe=*/false); + return; + } + if (AI && !SL.isAliveAfter(AI, I)) { + US.addRange(I, UnknownRange, /*IsSafe=*/false); + return; + } + auto TypeSize = DL.getTypeStoreSize(I->getOperand(ValOp)->getType()); + auto AccessRange = getAccessRange(UI, Ptr, TypeSize); + bool Safe = isSafeAccess(UI, AI, TypeSize); + US.addRange(I, AccessRange, Safe); + return; + }; + switch (I->getOpcode()) { case Instruction::Load: { if (AI && !SL.isAliveAfter(AI, I)) { @@ -424,22 +441,15 @@ case Instruction::VAArg: // "va-arg" from a pointer is safe. break; - case Instruction::Store: { - if (V == I->getOperand(0)) { - // Stored the pointer - conservatively assume it may be unsafe. - US.addRange(I, UnknownRange, /*IsSafe=*/false); - break; - } - if (AI && !SL.isAliveAfter(AI, I)) { - US.addRange(I, UnknownRange, /*IsSafe=*/false); - break; - } - auto TypeSize = DL.getTypeStoreSize(I->getOperand(0)->getType()); - auto AccessRange = getAccessRange(UI, Ptr, TypeSize); - bool Safe = isSafeAccess(UI, AI, TypeSize); - US.addRange(I, AccessRange, Safe); + case Instruction::Store: + RecordWrite(/*ValOp=*/0); + break; + case Instruction::AtomicCmpXchg: + RecordWrite(/*ValOp=*/2); + break; + case Instruction::AtomicRMW: + RecordWrite(/*ValOp=*/1); break; - } case Instruction::Ret: // Information leak. diff --git a/llvm/test/Analysis/StackSafetyAnalysis/Inputs/ipa.ll b/llvm/test/Analysis/StackSafetyAnalysis/Inputs/ipa.ll --- a/llvm/test/Analysis/StackSafetyAnalysis/Inputs/ipa.ll +++ b/llvm/test/Analysis/StackSafetyAnalysis/Inputs/ipa.ll @@ -15,6 +15,18 @@ ret void } +define dso_local void @Cmpxchg4(ptr %p) #0 { +entry: + cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic, align 1 + ret void +} + +define dso_local void @AtomicRMW4(ptr %p) #0 { +entry: + atomicrmw add ptr %p, i32 1 monotonic, align 1 + ret void +} + define dso_local void @Write4_2(ptr %p, ptr %q) #0 { entry: store i32 0, ptr %p, align 1 diff --git a/llvm/test/Analysis/StackSafetyAnalysis/ipa.ll b/llvm/test/Analysis/StackSafetyAnalysis/ipa.ll --- a/llvm/test/Analysis/StackSafetyAnalysis/ipa.ll +++ b/llvm/test/Analysis/StackSafetyAnalysis/ipa.ll @@ -22,6 +22,8 @@ ; RUN: -r %t.summ0.bc,f1,px \ ; RUN: -r %t.summ0.bc,f2,px \ ; RUN: -r %t.summ0.bc,f3,px \ +; RUN: -r %t.summ0.bc,f3_cmpxchg,px \ +; RUN: -r %t.summ0.bc,f3_atomicrmw,px \ ; RUN: -r %t.summ0.bc,f4,px \ ; RUN: -r %t.summ0.bc,f5,px \ ; RUN: -r %t.summ0.bc,f6,px \ @@ -59,6 +61,8 @@ ; RUN: -r %t.summ0.bc,Write1Weak,x \ ; RUN: -r %t.summ0.bc,Write4_2, \ ; RUN: -r %t.summ0.bc,Write4, \ +; RUN: -r %t.summ0.bc,Cmpxchg4, \ +; RUN: -r %t.summ0.bc,AtomicRMW4, \ ; RUN: -r %t.summ0.bc,Write8, \ ; RUN: -r %t.summ0.bc,WriteAndReturn8, \ ; RUN: -r %t.summ1.bc,ExternalCall,px \ @@ -80,6 +84,8 @@ ; RUN: -r %t.summ1.bc,Write1Weak,px \ ; RUN: -r %t.summ1.bc,Write4_2,px \ ; RUN: -r %t.summ1.bc,Write4,px \ +; RUN: -r %t.summ1.bc,Cmpxchg4,px \ +; RUN: -r %t.summ1.bc,AtomicRMW4,px \ ; RUN: -r %t.summ1.bc,Write8,px \ ; RUN: -r %t.summ1.bc,WriteAndReturn8,px @@ -97,6 +103,8 @@ declare void @Write1(ptr %p) declare void @Write4(ptr %p) +declare void @Cmpxchg4(ptr %p) +declare void @AtomicRMW4(ptr %p) declare void @Write4_2(ptr %p, ptr %q) declare void @Write8(ptr %p) declare dso_local ptr @WriteAndReturn8(ptr %p) @@ -157,6 +165,36 @@ ret void } +; Another basic in-bounds. +define void @f3_cmpxchg() #0 { +; CHECK-LABEL: @f3_cmpxchg dso_preemptable{{$}} +; CHECK-NEXT: args uses: +; CHECK-NEXT: allocas uses: +; LOCAL-NEXT: x[4]: empty-set, @Cmpxchg4(arg0, [0,1)){{$}} +; GLOBAL-NEXT: x[4]: [0,4), @Cmpxchg4(arg0, [0,1)){{$}} +; GLOBAL-NEXT: safe accesses: +; CHECK-EMPTY: +entry: + %x = alloca i32, align 4 + call void @Cmpxchg4(ptr %x) + ret void +} + +; Another basic in-bounds. +define void @f3_atomicrmw() #0 { +; CHECK-LABEL: @f3_atomicrmw dso_preemptable{{$}} +; CHECK-NEXT: args uses: +; CHECK-NEXT: allocas uses: +; LOCAL-NEXT: x[4]: empty-set, @AtomicRMW4(arg0, [0,1)){{$}} +; GLOBAL-NEXT: x[4]: [0,4), @AtomicRMW4(arg0, [0,1)){{$}} +; GLOBAL-NEXT: safe accesses: +; CHECK-EMPTY: +entry: + %x = alloca i32, align 4 + call void @AtomicRMW4(ptr %x) + ret void +} + ; In-bounds with offset. define void @f4() #0 { ; CHECK-LABEL: @f4 dso_preemptable{{$}} @@ -660,11 +698,15 @@ ; INDEX-DAG: name: "TestRecursiveNoOffset"{{.*}} guid = [[TestRecursiveNoOffset:[-0-9]+]] ; INDEX-DAG: name: "f8left"{{.*}} guid = [[f8left:[-0-9]+]] ; INDEX-DAG: name: "Write4"{{.*}} guid = [[Write4:[-0-9]+]] +; INDEX-DAG: name: "Cmpxchg4"{{.*}} guid = [[Write4:[-0-9]+]] +; INDEX-DAG: name: "AtomicRMW4"{{.*}} guid = [[Write4:[-0-9]+]] ; INDEX-DAG: name: "f7"{{.*}} guid = [[f7:[-0-9]+]] ; INDEX-DAG: name: "Write1SameModule"{{.*}} guid = [[Write1SameModule:[-0-9]+]] ; INDEX-DAG: name: "Write8"{{.*}} guid = [[Write8:[-0-9]+]] ; INDEX-DAG: name: "TwoArgumentsOOBOne"{{.*}} guid = [[TwoArgumentsOOBOne:[-0-9]+]] ; INDEX-DAG: name: "f3"{{.*}} guid = [[f3:[-0-9]+]] +; INDEX-DAG: name: "f3_cmpxchg"{{.*}} guid = [[f3:[-0-9]+]] +; INDEX-DAG: name: "f3_atomicrmw"{{.*}} guid = [[f3:[-0-9]+]] ; INDEX-DAG: name: "f8right"{{.*}} guid = [[f8right:[-0-9]+]] ; INDEX-DAG: name: "Write4_2"{{.*}} guid = [[Write4_2:[-0-9]+]] ; INDEX-DAG: name: "RecursiveWithOffset"{{.*}} guid = [[RecursiveWithOffset:[-0-9]+]] diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll --- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll @@ -23,6 +23,23 @@ ret i32 0 } +; Check a safe alloca to ensure it does not get a tag. +define i32 @test_cmpxchg(ptr %a) sanitize_hwaddress { +entry: + ; CHECK-LABEL: @test_cmpxchg + ; NOSAFETY: call {{.*}}__hwasan_generate_tag + ; NOSAFETY: call {{.*}}__hwasan_store + ; SAFETY-NOT: call {{.*}}__hwasan_generate_tag + ; SAFETY-NOT: call {{.*}}__hwasan_store + ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag + ; NOSTACK-NOT: call {{.*}}__hwasan_store + %buf.sroa.0 = alloca i8, align 4 + call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0) + %0 = cmpxchg ptr %buf.sroa.0, i8 1, i8 2 monotonic monotonic + call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0) + ret i32 0 +} + ; Check a non-safe alloca to ensure it gets a tag. define i32 @test_use(ptr %a) sanitize_hwaddress { entry: