Index: llvm/include/llvm/CodeGen/TargetFrameLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetFrameLowering.h +++ llvm/include/llvm/CodeGen/TargetFrameLowering.h @@ -335,7 +335,7 @@ /// Check if given function is safe for not having callee saved registers. /// This is used when interprocedural register allocation is enabled. static bool isSafeForNoCSROpt(const Function &F) { - if (!F.hasLocalLinkage() || F.hasAddressTaken() || + if (!F.isDefinitionExact() || F.hasAddressTaken() || !F.hasFnAttribute(Attribute::NoRecurse)) return false; // Function should not be optimized as tail call. Index: llvm/lib/CodeGen/RegUsageInfoCollector.cpp =================================================================== --- llvm/lib/CodeGen/RegUsageInfoCollector.cpp +++ llvm/lib/CodeGen/RegUsageInfoCollector.cpp @@ -101,6 +101,9 @@ const Function &F = MF.getFunction(); + if (!F.isDefinitionExact()) + return false; + PhysicalRegisterUsageInfo *PRUI = &getAnalysis(); PRUI->setTargetMachine(&TM); Index: llvm/test/CodeGen/PowerPC/ipra-odr.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/ipra-odr.ll @@ -0,0 +1,33 @@ +; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -enable-ipra -print-regusage < %s 2>&1 | FileCheck %s +; +; Check that IPRA does not run and change register allocation in functions that +; have callees that could be redefined at link time. + +@x = local_unnamed_addr global i64 0, align 4 + +; Verify that IPRA is not called on bar +; CHECK-NOT: bar Clobbered Registers: +define weak_odr void @bar(i64 signext %a, i64 signext %b) local_unnamed_addr { +entry: + %add = add nsw i64 %b, %a + store i64 %add, i64* @x, align 4 + ret void +} + +; Verify that IPRA is called on foo, and that we are restoring registers between calls to bar. + +; CHECK:foo Clobbered Registers: $bp $carry $ctr $fp $lr $rm $vrsave $xer $zero $bp8 $cr0 $cr1 $cr5 $cr6 $cr7 $ctr8 $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $fp8 $lr8 $qf0 $qf1 $qf2 $qf3 $qf4 $qf5 $qf6 $qf7 $qf8 $qf9 $qf10 $qf11 $qf12 $qf13 $qf14 $qf15 $qf16 $qf17 $qf18 $qf19 $qf20 $qf21 $qf22 $qf23 $qf24 $qf25 $qf26 $qf27 $qf28 $qf29 $qf30 $qf31 $r0 $r1 $r2 $r3 $r4 $r5 $r6 $r7 $r8 $r9 $r10 $r11 $r12 $r13 $v0 $v1 $v2 $v3 $v4 $v5 $v6 $v7 $v8 $v9 $v10 $v11 $v12 $v13 $v14 $v15 $v16 $v17 $v18 $v19 $vf0 $vf1 $vf2 $vf3 $vf4 $vf5 $vf6 $vf7 $vf8 $vf9 $vf10 $vf11 $vf12 $vf13 $vf14 $vf15 $vf16 $vf17 $vf18 $vf19 $vsl0 $vsl1 $vsl2 $vsl3 $vsl4 $vsl5 $vsl6 $vsl7 $vsl8 $vsl9 $vsl10 $vsl11 $vsl12 $vsl13 $vsl14 $vsl15 $vsl16 $vsl17 $vsl18 $vsl19 $vsl20 $vsl21 $vsl22 $vsl23 $vsl24 $vsl25 $vsl26 $vsl27 $vsl28 $vsl29 $vsl30 $vsl31 $vsx32 $vsx33 $vsx34 $vsx35 $vsx36 $vsx37 $vsx38 $vsx39 $vsx40 $vsx41 $vsx42 $vsx43 $vsx44 $vsx45 $vsx46 $vsx47 $vsx48 $vsx49 $vsx50 $vsx51 $vsx52 $vsx53 $vsx54 $vsx55 $vsx56 $vsx57 $vsx58 $vsx59 $vsx60 $vsx61 $vsx62 $vsx63 $x0 $x1 $x2 $x3 $x4 $x5 $x6 $x7 $x8 $x9 $x10 $x11 $x12 $x13 $zero8 $cr0eq $cr1eq $cr5eq $cr6eq $cr7eq $cr0gt $cr1gt $cr5gt $cr6gt $cr7gt $cr0lt $cr1lt $cr5lt $cr6lt $cr7lt $cr0un $cr1un $cr5un $cr6un $cr7un +; CHECK-LABEL: @foo +; CHECK: bl bar +; CHECK: nop +; CHECK: mr 3, {{[0-9]+}} +; CHECK: mr 4, {{[0-9]+}} +; CHECK: bl bar +; CHECK: blr +define signext i64 @foo(i64 signext %a, i64 signext %b) { +entry: + call void @bar(i64 signext %a, i64 signext %b) + call void @bar(i64 signext %a, i64 signext %b) + +ret i64 0 +}