Index: lib/CodeGen/MachineFunction.cpp =================================================================== --- lib/CodeGen/MachineFunction.cpp +++ lib/CodeGen/MachineFunction.cpp @@ -607,12 +607,16 @@ for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR) BV.set(*CSR); - // Saved CSRs are not pristine. - const std::vector &CSI = getCalleeSavedInfo(); - for (std::vector::const_iterator I = CSI.begin(), - E = CSI.end(); I != E; ++I) - BV.reset(I->getReg()); - + // If shrink-wrapping has run on this function, we must assume that CSRs are + // still pristine because the Save/Restore points are not in the + // prologue/epilogue. + if (!getSavePoint()) { + // Saved CSRs are not pristine. + const std::vector &CSI = getCalleeSavedInfo(); + for (std::vector::const_iterator I = CSI.begin(), + E = CSI.end(); I != E; ++I) + BV.reset(I->getReg()); + } return BV; } Index: test/CodeGen/PowerPC/transpose.ll =================================================================== --- /dev/null +++ test/CodeGen/PowerPC/transpose.ll @@ -0,0 +1,243 @@ +; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE +; ModuleID = 'transpose.c' +; +; Test that callee-saved registers are not used after the restore point +; generated by PrologueEpilogueInserter. This problem was found origianlly +; because of bad assumptions made by the getPristineRegister method when shrink +; wrapping has optimized the function to move the save/restore blocks for +; callee-saved registers out of the function prologue and epilogue. + +target datalayout = "e-m:e-i64:64-n32:64" +target triple = "powerpc64le-unknown-linux-gnu" + +@columns = external global [0 x i32], align 4 +@lock = common global i32 0, align 4 +@htindex = common global i32 0, align 4 +@stride = common global i32 0, align 4 +@ht = common global i32* null, align 8 +@he = common global i8* null, align 8 + +; Function Attrs: nounwind +define void @hash() #0 { +entry: + %0 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 1), align 4, !tbaa !1 + %shl = shl i32 %0, 7 + %1 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 2), align 4, !tbaa !1 + %or = or i32 %shl, %1 + %shl1 = shl i32 %or, 7 + %2 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 3), align 4, !tbaa !1 + %or2 = or i32 %shl1, %2 + %3 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 7), align 4, !tbaa !1 + %shl3 = shl i32 %3, 7 + %4 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 6), align 4, !tbaa !1 + %or4 = or i32 %shl3, %4 + %shl5 = shl i32 %or4, 7 + %5 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 5), align 4, !tbaa !1 + %or6 = or i32 %shl5, %5 + %cmp = icmp ugt i32 %or2, %or6 + br i1 %cmp, label %cond.true, label %cond.false + +cond.true: ; preds = %entry + %shl7 = shl i32 %or2, 7 + %6 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4, !tbaa !1 + %or8 = or i32 %6, %shl7 + %conv = zext i32 %or8 to i64 + %shl9 = shl nuw nsw i64 %conv, 21 + %conv10 = zext i32 %or6 to i64 + %or11 = or i64 %shl9, %conv10 + br label %cond.end + +cond.false: ; preds = %entry + %shl12 = shl i32 %or6, 7 + %7 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4, !tbaa !1 + %or13 = or i32 %7, %shl12 + %conv14 = zext i32 %or13 to i64 + %shl15 = shl nuw nsw i64 %conv14, 21 + %conv16 = zext i32 %or2 to i64 + %or17 = or i64 %shl15, %conv16 + br label %cond.end + +cond.end: ; preds = %cond.false, %cond.true + %cond = phi i64 [ %or11, %cond.true ], [ %or17, %cond.false ] + %shr.29 = lshr i64 %cond, 17 + %conv18 = trunc i64 %shr.29 to i32 + store i32 %conv18, i32* @lock, align 4, !tbaa !1 + %rem = srem i64 %cond, 1050011 + %conv19 = trunc i64 %rem to i32 + store i32 %conv19, i32* @htindex, align 4, !tbaa !1 + %rem20 = urem i32 %conv18, 179 + %add = or i32 %rem20, 131072 + store i32 %add, i32* @stride, align 4, !tbaa !1 + ret void +} + +; Function Attrs: nounwind +; CHECK-LABEL: transpose +; +; Store of callee-save register saved by shrink wrapping +; CHECK: std [[CSR:[0-9]+]], -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill +; +; Reload of callee-save register +; CHECK: ld [[CSR]], -[[STACK_OFFSET]](1) # 8-byte Folded Reload +; +; Ensure no subsequent uses of callee-save register before end of function +; CHECK-NOT: {{[a-z]+}} [[CSR]] +; CHECK: blr +define signext i32 @transpose() #0 { +entry: + %0 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 1), align 4, !tbaa !1 + %shl.i = shl i32 %0, 7 + %1 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 2), align 4, !tbaa !1 + %or.i = or i32 %shl.i, %1 + %shl1.i = shl i32 %or.i, 7 + %2 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 3), align 4, !tbaa !1 + %or2.i = or i32 %shl1.i, %2 + %3 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 7), align 4, !tbaa !1 + %shl3.i = shl i32 %3, 7 + %4 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 6), align 4, !tbaa !1 + %or4.i = or i32 %shl3.i, %4 + %shl5.i = shl i32 %or4.i, 7 + %5 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 5), align 4, !tbaa !1 + %or6.i = or i32 %shl5.i, %5 + %cmp.i = icmp ugt i32 %or2.i, %or6.i + br i1 %cmp.i, label %cond.true.i, label %cond.false.i + +cond.true.i: ; preds = %entry + %shl7.i = shl i32 %or2.i, 7 + %6 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4, !tbaa !1 + %or8.i = or i32 %6, %shl7.i + %conv.i = zext i32 %or8.i to i64 + %shl9.i = shl nuw nsw i64 %conv.i, 21 + %conv10.i = zext i32 %or6.i to i64 + %or11.i = or i64 %shl9.i, %conv10.i + br label %hash.exit + +cond.false.i: ; preds = %entry + %shl12.i = shl i32 %or6.i, 7 + %7 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4, !tbaa !1 + %or13.i = or i32 %7, %shl12.i + %conv14.i = zext i32 %or13.i to i64 + %shl15.i = shl nuw nsw i64 %conv14.i, 21 + %conv16.i = zext i32 %or2.i to i64 + %or17.i = or i64 %shl15.i, %conv16.i + br label %hash.exit + +hash.exit: ; preds = %cond.true.i, %cond.false.i + %cond.i = phi i64 [ %or11.i, %cond.true.i ], [ %or17.i, %cond.false.i ] + %shr.29.i = lshr i64 %cond.i, 17 + %conv18.i = trunc i64 %shr.29.i to i32 + store i32 %conv18.i, i32* @lock, align 4, !tbaa !1 + %rem.i = srem i64 %cond.i, 1050011 + %conv19.i = trunc i64 %rem.i to i32 + store i32 %conv19.i, i32* @htindex, align 4, !tbaa !1 + %rem20.i = urem i32 %conv18.i, 179 + %add.i = or i32 %rem20.i, 131072 + store i32 %add.i, i32* @stride, align 4, !tbaa !1 + %8 = load i32*, i32** @ht, align 8, !tbaa !5 + %arrayidx = getelementptr inbounds i32, i32* %8, i64 %rem.i + %9 = load i32, i32* %arrayidx, align 4, !tbaa !1 + %cmp1 = icmp eq i32 %9, %conv18.i + br i1 %cmp1, label %if.then, label %if.end + +if.then: ; preds = %if.end.6, %if.end.5, %if.end.4, %if.end.3, %if.end.2, %if.end.1, %if.end, %hash.exit + %idxprom.lcssa = phi i64 [ %rem.i, %hash.exit ], [ %idxprom.1, %if.end ], [ %idxprom.2, %if.end.1 ], [ %idxprom.3, %if.end.2 ], [ %idxprom.4, %if.end.3 ], [ %idxprom.5, %if.end.4 ], [ %idxprom.6, %if.end.5 ], [ %idxprom.7, %if.end.6 ] + %10 = load i8*, i8** @he, align 8, !tbaa !5 + %arrayidx3 = getelementptr inbounds i8, i8* %10, i64 %idxprom.lcssa + %11 = load i8, i8* %arrayidx3, align 1, !tbaa !7 + %conv = sext i8 %11 to i32 + br label %cleanup + +if.end: ; preds = %hash.exit + %add = add nsw i32 %add.i, %conv19.i + %cmp4 = icmp sgt i32 %add, 1050010 + %sub = add nsw i32 %add, -1050011 + %sub.add = select i1 %cmp4, i32 %sub, i32 %add + %idxprom.1 = sext i32 %sub.add to i64 + %arrayidx.1 = getelementptr inbounds i32, i32* %8, i64 %idxprom.1 + %12 = load i32, i32* %arrayidx.1, align 4, !tbaa !1 + %cmp1.1 = icmp eq i32 %12, %conv18.i + br i1 %cmp1.1, label %if.then, label %if.end.1 + +cleanup: ; preds = %if.end.6, %if.then + %retval.0 = phi i32 [ %conv, %if.then ], [ -128, %if.end.6 ] + ret i32 %retval.0 + +if.end.1: ; preds = %if.end + %add.1 = add nsw i32 %add.i, %sub.add + %cmp4.1 = icmp sgt i32 %add.1, 1050010 + %sub.1 = add nsw i32 %add.1, -1050011 + %sub.add.1 = select i1 %cmp4.1, i32 %sub.1, i32 %add.1 + %idxprom.2 = sext i32 %sub.add.1 to i64 + %arrayidx.2 = getelementptr inbounds i32, i32* %8, i64 %idxprom.2 + %13 = load i32, i32* %arrayidx.2, align 4, !tbaa !1 + %cmp1.2 = icmp eq i32 %13, %conv18.i + br i1 %cmp1.2, label %if.then, label %if.end.2 + +if.end.2: ; preds = %if.end.1 + %add.2 = add nsw i32 %add.i, %sub.add.1 + %cmp4.2 = icmp sgt i32 %add.2, 1050010 + %sub.2 = add nsw i32 %add.2, -1050011 + %sub.add.2 = select i1 %cmp4.2, i32 %sub.2, i32 %add.2 + %idxprom.3 = sext i32 %sub.add.2 to i64 + %arrayidx.3 = getelementptr inbounds i32, i32* %8, i64 %idxprom.3 + %14 = load i32, i32* %arrayidx.3, align 4, !tbaa !1 + %cmp1.3 = icmp eq i32 %14, %conv18.i + br i1 %cmp1.3, label %if.then, label %if.end.3 + +if.end.3: ; preds = %if.end.2 + %add.3 = add nsw i32 %add.i, %sub.add.2 + %cmp4.3 = icmp sgt i32 %add.3, 1050010 + %sub.3 = add nsw i32 %add.3, -1050011 + %sub.add.3 = select i1 %cmp4.3, i32 %sub.3, i32 %add.3 + %idxprom.4 = sext i32 %sub.add.3 to i64 + %arrayidx.4 = getelementptr inbounds i32, i32* %8, i64 %idxprom.4 + %15 = load i32, i32* %arrayidx.4, align 4, !tbaa !1 + %cmp1.4 = icmp eq i32 %15, %conv18.i + br i1 %cmp1.4, label %if.then, label %if.end.4 + +if.end.4: ; preds = %if.end.3 + %add.4 = add nsw i32 %add.i, %sub.add.3 + %cmp4.4 = icmp sgt i32 %add.4, 1050010 + %sub.4 = add nsw i32 %add.4, -1050011 + %sub.add.4 = select i1 %cmp4.4, i32 %sub.4, i32 %add.4 + %idxprom.5 = sext i32 %sub.add.4 to i64 + %arrayidx.5 = getelementptr inbounds i32, i32* %8, i64 %idxprom.5 + %16 = load i32, i32* %arrayidx.5, align 4, !tbaa !1 + %cmp1.5 = icmp eq i32 %16, %conv18.i + br i1 %cmp1.5, label %if.then, label %if.end.5 + +if.end.5: ; preds = %if.end.4 + %add.5 = add nsw i32 %add.i, %sub.add.4 + %cmp4.5 = icmp sgt i32 %add.5, 1050010 + %sub.5 = add nsw i32 %add.5, -1050011 + %sub.add.5 = select i1 %cmp4.5, i32 %sub.5, i32 %add.5 + %idxprom.6 = sext i32 %sub.add.5 to i64 + %arrayidx.6 = getelementptr inbounds i32, i32* %8, i64 %idxprom.6 + %17 = load i32, i32* %arrayidx.6, align 4, !tbaa !1 + %cmp1.6 = icmp eq i32 %17, %conv18.i + br i1 %cmp1.6, label %if.then, label %if.end.6 + +if.end.6: ; preds = %if.end.5 + %add.6 = add nsw i32 %add.i, %sub.add.5 + %cmp4.6 = icmp sgt i32 %add.6, 1050010 + %sub.6 = add nsw i32 %add.6, -1050011 + %sub.add.6 = select i1 %cmp4.6, i32 %sub.6, i32 %add.6 + %idxprom.7 = sext i32 %sub.add.6 to i64 + %arrayidx.7 = getelementptr inbounds i32, i32* %8, i64 %idxprom.7 + %18 = load i32, i32* %arrayidx.7, align 4, !tbaa !1 + %cmp1.7 = icmp eq i32 %18, %conv18.i + br i1 %cmp1.7, label %if.then, label %cleanup +} + +attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.ident = !{!0} + +!0 = !{!"clang version 3.8.0 (trunk 247277) (llvm/trunk 247283)"} +!1 = !{!2, !2, i64 0} +!2 = !{!"int", !3, i64 0} +!3 = !{!"omnipotent char", !4, i64 0} +!4 = !{!"Simple C/C++ TBAA"} +!5 = !{!6, !6, i64 0} +!6 = !{!"any pointer", !3, i64 0} +!7 = !{!3, !3, i64 0}