diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -3895,24 +3895,36 @@ unsigned Opc = MI->getOpcode(); unsigned SrcOpc = SrcMI->getOpcode(); if ((SrcOpc == PPC::RLWINM8 || SrcOpc == PPC::RLWINM8_rec) && - (Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec)) { + (Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec || + Opc == PPC::ANDI8_rec)) { Is64Bit = true; return true; } if ((SrcOpc == PPC::RLWINM || SrcOpc == PPC::RLWINM_rec) && - (Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec)) + (Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::ANDI_rec)) return true; return false; } -// This function tries to combine two RLWINMs. We not only perform such -// optimization in SSA, but also after RA, since some RLWINM is generated after -// RA. +static void getRLWINMOps(MachineInstr &MI, uint32_t &SH, uint32_t &MB, + uint32_t &ME) { + assert((MI.getOperand(2).isImm() && MI.getOperand(3).isImm() && + MI.getOperand(4).isImm()) && + "Invalid PPC::RLWINM Instruction!"); + SH = MI.getOperand(2).getImm(); + MB = MI.getOperand(3).getImm(); + ME = MI.getOperand(4).getImm(); + assert((ME < 32 && MB < 32) && "Invalid PPC::RLWINM Instruction!"); +} + +// This function tries to optimize rotate and mask instructions in both SSA and +// post-RA. bool PPCInstrInfo::simplifyRotateAndMaskInstr(MachineInstr &MI, MachineInstr *&ToErase) const { unsigned UseOpc = MI.getOpcode(); if (UseOpc != PPC::RLWINM && UseOpc != PPC::RLWINM_rec && - UseOpc != PPC::RLWINM8 && UseOpc != PPC::RLWINM8_rec) + UseOpc != PPC::RLWINM8 && UseOpc != PPC::RLWINM8_rec && + UseOpc != PPC::ANDI_rec && UseOpc != PPC::ANDI8_rec) return false; // Find the source MI. @@ -3949,6 +3961,8 @@ bool IsMIUseRegKilled = MI.getOperand(1).isKill(); if (MRI->isSSA()) { CanErase = !SrcMI->hasImplicitDef() && MRI->hasOneNonDBGUse(FoldingReg); + if (!CanErase && !MI.getOperand(1).isKill()) + return false; } else { bool KillFwdDefMI = !OtherIntermediateUse && IsMIUseRegKilled; CanErase = KillFwdDefMI && !SrcMI->hasImplicitDef(); @@ -3962,18 +3976,48 @@ return false; } - assert((MI.getOperand(2).isImm() && MI.getOperand(3).isImm() && - MI.getOperand(4).isImm() && SrcMI->getOperand(2).isImm() && - SrcMI->getOperand(3).isImm() && SrcMI->getOperand(4).isImm()) && - "Invalid PPC::RLWINM Instruction!"); - uint64_t SHSrc = SrcMI->getOperand(2).getImm(); - uint64_t SHMI = MI.getOperand(2).getImm(); - uint64_t MBSrc = SrcMI->getOperand(3).getImm(); - uint64_t MBMI = MI.getOperand(3).getImm(); - uint64_t MESrc = SrcMI->getOperand(4).getImm(); - uint64_t MEMI = MI.getOperand(4).getImm(); - assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) && - "Invalid PPC::RLWINM Instruction!"); + uint32_t SHSrc, MBSrc, MESrc; + getRLWINMOps(*SrcMI, SHSrc, MBSrc, MESrc); + // Note that in APInt, the least significant bit is at index 0, while in + // PowerPC ISA, the least significant bit is at index 63. + APInt MaskSrc = APInt::getBitsSetWithWrap(32, 32 - MESrc - 1, 32 - MBSrc); + // Mark the special cases of all bits in a 64-bit register or the low 32 bits + // in a 64-bit register. + bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31); + bool Simplified = false; + uint32_t SHMI, MBMI, MEMI, NewMB, NewME; + APInt FinalMask; + + // Pattern 1: RLWINM_ + RLWINM_ + if (MI.getOpcode() == PPC::RLWINM || MI.getOpcode() == PPC::RLWINM_rec || + MI.getOpcode() == PPC::RLWINM8 || MI.getOpcode() == PPC::RLWINM8_rec) { + getRLWINMOps(MI, SHMI, MBMI, MEMI); + // For other MBMI > MEMI cases, just return. + if ((MBMI > MEMI) && !SrcMaskFull) + return false; + // Handle MBMI <= MEMI cases. + // In MI, we only need low 32 bits of SrcMI, just consider about low 32 + // bit of SrcMI mask. + APInt MaskMI = APInt::getBitsSetWithWrap(32, 32 - MEMI - 1, 32 - MBMI); + APInt RotatedSrcMask = MaskSrc.rotl(SHMI); + FinalMask = RotatedSrcMask & MaskMI; + } + // Pattern 2: RLWINM_ + ANDI_ + else { + assert(MI.getOperand(2).isImm() && "Invalid PPC::ANDI_rec Instruction!"); + uint32_t AndImm = MI.getOperand(2).getImm(); + assert(isUIntN(16, AndImm) && "Invalid PPC::ANDI_rec Instruction!"); + // We can treat ANDI_rec as RLWINM_rec with the SH = 0 if the AndImm + // contains a non-empty sequence of ones with the remainder zeros + // (isRunOfOnes). + SHMI = 0; + FinalMask = MaskSrc & AndImm; + // If AndImm isn't isRunOfOnes, we can only do the folding when FinalMask + // equals to zero. + if (!isShiftedMask_32(AndImm) && FinalMask != 0) + return false; + } + // If MBMI is bigger than MEMI, we always can not get run of ones. // RotatedSrcMask non-wrap: // 0........31|32........63 @@ -3994,26 +4038,6 @@ // MaskMI: -----------|--E B------ // Result: -----------|--- ------- (Good candidate) - // Mark the special cases of all bits in a 64-bit register or the low 32 bits - // in a 64-bit register. - bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31); - - // For other MBMI > MEMI cases, just return. - if ((MBMI > MEMI) && !SrcMaskFull) - return false; - - // Handle MBMI <= MEMI cases. - APInt MaskMI = APInt::getBitsSetWithWrap(32, 32 - MEMI - 1, 32 - MBMI); - // In MI, we only need low 32 bits of SrcMI, just consider about low 32 - // bit of SrcMI mask. Note that in APInt, the least significant bit is at - // index 0, while in PowerPC ISA, the least significant bit is at index 63. - APInt MaskSrc = APInt::getBitsSetWithWrap(32, 32 - MESrc - 1, 32 - MBSrc); - - APInt RotatedSrcMask = MaskSrc.rotl(SHMI); - APInt FinalMask = RotatedSrcMask & MaskMI; - uint32_t NewMB, NewME; - bool Simplified = false; - // If final mask is 0, replace MI with LI/LI8 0 or ANDI_rec/ANDI8_rec 0. if (FinalMask.isZero()) { Simplified = true; @@ -4023,7 +4047,8 @@ LoadImmediateInfo LII; LII.Imm = 0; LII.Is64Bit = Is64Bit; - LII.SetCR = (UseOpc == PPC::RLWINM_rec || UseOpc == PPC::RLWINM8_rec); + LII.SetCR = (UseOpc == PPC::RLWINM_rec || UseOpc == PPC::RLWINM8_rec || + UseOpc == PPC::ANDI_rec || UseOpc == PPC::ANDI8_rec); replaceInstrWithLI(MI, LII); if (LII.SetCR) { MI.getOperand(1).setReg(ForwardReg); @@ -4051,8 +4076,13 @@ LLVM_DEBUG(MI.dump()); MI.getOperand(2).setImm((SHSrc + SHMI) % 32); + if (UseOpc == PPC::ANDI_rec || UseOpc == PPC::ANDI8_rec) { + MI.setDesc(get(Is64Bit ? PPC::RLWINM8_rec : PPC::RLWINM_rec)); + MI.addOperand(MachineOperand::CreateImm(NewMB)); + MI.addOperand(MachineOperand::CreateImm(NewME)); + } // If SrcMI mask is full, do not update MBMI and MEMI. - if (!SrcMaskFull) { + else if (!SrcMaskFull) { MI.getOperand(3).setImm(NewMB); MI.getOperand(4).setImm(NewME); } diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -1081,6 +1081,8 @@ combineSEXTAndSHL(MI, ToErase); break; } + case PPC::ANDI_rec: + case PPC::ANDI8_rec: case PPC::RLWINM: case PPC::RLWINM_rec: case PPC::RLWINM8: diff --git a/llvm/test/CodeGen/PowerPC/fold-rlwinm-1.ll b/llvm/test/CodeGen/PowerPC/fold-rlwinm-1.ll --- a/llvm/test/CodeGen/PowerPC/fold-rlwinm-1.ll +++ b/llvm/test/CodeGen/PowerPC/fold-rlwinm-1.ll @@ -29,10 +29,10 @@ ; CHECK-NEXT: addis r4, r2, res2@toc@ha ; CHECK-NEXT: addis r6, r2, res@toc@ha ; CHECK-NEXT: cntlzw r3, r3 -; CHECK-NEXT: srwi r5, r3, 5 -; CHECK-NEXT: rlwinm r3, r3, 14, 0, 12 -; CHECK-NEXT: stw r5, res2@toc@l(r4) -; CHECK-NEXT: stw r3, res@toc@l(r6) +; CHECK-NEXT: srwi r3, r3, 5 +; CHECK-NEXT: slwi r5, r3, 19 +; CHECK-NEXT: stw r3, res2@toc@l(r4) +; CHECK-NEXT: stw r5, res@toc@l(r6) ; CHECK-NEXT: blr entry: %cmp = icmp eq i32 %var1, 1 diff --git a/llvm/test/CodeGen/PowerPC/fold-rlwinm-after-ra.mir b/llvm/test/CodeGen/PowerPC/fold-rlwinm-after-ra.mir --- a/llvm/test/CodeGen/PowerPC/fold-rlwinm-after-ra.mir +++ b/llvm/test/CodeGen/PowerPC/fold-rlwinm-after-ra.mir @@ -192,3 +192,69 @@ dead renamable $r3 = RLWINM killed renamable $r3, 19, 0, 12, implicit-def $x3 BLR8 implicit $lr8, implicit $rm, implicit killed $x2, implicit killed $x3 ... +--- +name: testFoldRLWINMAndANDI +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $r3 + ; CHECK-LABEL: name: testFoldRLWINMAndANDI + ; CHECK: liveins: $r3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: dead renamable $r3 = RLWINM_rec killed renamable $r3, 8, 29, 29, implicit-def $cr0 + ; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit killed $cr0 + $r3 = RLWINM killed $r3, 8, 28, 31 + dead renamable $r3 = ANDI_rec killed renamable $r3, 4, implicit-def $cr0 + BLR8 implicit $lr8, implicit $rm, implicit killed $cr0 +... +--- +name: testFoldRLWINMAndANDIToZero +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $r3 + ; CHECK-LABEL: name: testFoldRLWINMAndANDIToZero + ; CHECK: liveins: $r3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: dead renamable $r3 = ANDI_rec killed renamable $r3, 0, implicit-def $cr0 + ; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit killed $cr0 + $r3 = RLWINM killed $r3, 4, 28, 31 + dead renamable $r3 = ANDI_rec killed renamable $r3, 16, implicit-def $cr0 + BLR8 implicit $lr8, implicit $rm, implicit killed $cr0 +... +--- +name: testRLWINMANDIInvalidMask +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $r3 + ; CHECK-LABEL: name: testRLWINMANDIInvalidMask + ; CHECK: liveins: $r3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $r3 = RLWINM killed $r3, 4, 20, 31 + ; CHECK-NEXT: dead renamable $r3 = ANDI_rec killed renamable $r3, 9, implicit-def $cr0 + ; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit killed $cr0 + $r3 = RLWINM killed $r3, 4, 20, 31 + dead renamable $r3 = ANDI_rec killed renamable $r3, 9, implicit-def $cr0 + BLR8 implicit $lr8, implicit $rm, implicit killed $cr0 +... +--- +name: testCanNotFoldRLWINMAndANDI +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $r2, $r3 + ; CHECK-LABEL: name: testCanNotFoldRLWINMAndANDI + ; CHECK: liveins: $r2, $r3, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: STD $x2, -8, $x1 :: (store (s64) into %stack.0) + ; CHECK-NEXT: $r3 = RLWINM killed $r2, 4, 28, 31 + ; CHECK-NEXT: $r2 = LI 0, implicit-def $x2 + ; CHECK-NEXT: $x2 = LD -8, $x1 :: (load (s64) from %stack.0) + ; CHECK-NEXT: dead renamable $r3 = ANDI_rec killed renamable $r3, 4, implicit-def $cr0 + ; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit killed $cr0, implicit $x2 + $r3 = RLWINM killed $r2, 4, 28, 31 + $r2 = LI 0, implicit-def $x2 + dead renamable $r3 = ANDI_rec killed renamable $r3, 4, implicit-def $cr0 + BLR8 implicit $lr8, implicit $rm, implicit killed $cr0, implicit killed $x2 +... diff --git a/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir b/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir --- a/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir +++ b/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir @@ -101,9 +101,9 @@ %0:g8rc = COPY $x3 %1:gprc = COPY %0.sub_32:g8rc %2:gprc = RLWINM killed %1:gprc, 27, 5, 31 - ; CHECK: %2:gprc = RLWINM %1, 27, 5, 31 + ; CHECK: %2:gprc = RLWINM killed %1, 27, 5, 31 %3:gprc = RLWINM %2:gprc, 19, 0, 12 - ; CHECK: %3:gprc = RLWINM killed %1, 14, 0, 12 + ; CHECK: %3:gprc = RLWINM %2, 19, 0, 12 STW %3:gprc, %2:gprc, 100 ; CHECK: STW %3, %2, 100 BLR8 implicit $lr8, implicit $rm @@ -149,7 +149,7 @@ %1:gprc = COPY %0.sub_32:g8rc %2:gprc = RLWINM_rec %1:gprc, 27, 5, 10, implicit-def $cr0 ; CHECK: %2:gprc = RLWINM_rec %1, 27, 5, 10, implicit-def $cr0 - %3:gprc = RLWINM_rec %2:gprc, 8, 5, 10, implicit-def $cr0 + %3:gprc = RLWINM_rec killed %2:gprc, 8, 5, 10, implicit-def $cr0 ; CHECK: %3:gprc = ANDI_rec %1, 0, implicit-def $cr0 BLR8 implicit $lr8, implicit $rm ... @@ -192,8 +192,7 @@ ; CHECK: liveins: $x3 ; CHECK: [[COPY:%[0-9]+]]:g8rc = COPY $x3 ; CHECK: [[COPY1:%[0-9]+]]:gprc = COPY [[COPY]].sub_32 - ; CHECK: [[RLWINM:%[0-9]+]]:gprc = RLWINM [[COPY1]], 4, 28, 31 - ; CHECK: [[ANDI_rec:%[0-9]+]]:gprc = ANDI_rec [[RLWINM]], 4, implicit-def $cr0 + ; CHECK: [[RLWINM_rec:%[0-9]+]]:gprc = RLWINM_rec [[COPY1]], 4, 29, 29, implicit-def $cr0 ; CHECK: BLR8 implicit $lr8, implicit $rm %0:g8rc = COPY $x3 %1:gprc = COPY %0.sub_32:g8rc diff --git a/llvm/test/CodeGen/PowerPC/is_fpclass.ll b/llvm/test/CodeGen/PowerPC/is_fpclass.ll --- a/llvm/test/CodeGen/PowerPC/is_fpclass.ll +++ b/llvm/test/CodeGen/PowerPC/is_fpclass.ll @@ -303,8 +303,7 @@ ; CHECK-NEXT: xscvdpspn 0, 1 ; CHECK-NEXT: xststdcsp 1, 1, 64 ; CHECK-NEXT: mffprwz 3, 0 -; CHECK-NEXT: srwi 3, 3, 22 -; CHECK-NEXT: andi. 3, 3, 1 +; CHECK-NEXT: rlwinm. 3, 3, 10, 31, 31 ; CHECK-NEXT: li 3, 1 ; CHECK-NEXT: crnand 20, 6, 1 ; CHECK-NEXT: isel 3, 0, 3, 20 @@ -334,8 +333,7 @@ ; CHECK-NEXT: li 3, 12 ; CHECK-NEXT: xststdcqp 1, 2, 64 ; CHECK-NEXT: vextuwrx 3, 3, 2 -; CHECK-NEXT: srwi 3, 3, 15 -; CHECK-NEXT: andi. 3, 3, 1 +; CHECK-NEXT: rlwinm. 3, 3, 17, 31, 31 ; CHECK-NEXT: li 3, 1 ; CHECK-NEXT: crnand 20, 6, 1 ; CHECK-NEXT: isel 3, 0, 3, 20 diff --git a/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll --- a/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll +++ b/llvm/test/CodeGen/PowerPC/p10-spill-crgt.ll @@ -17,8 +17,7 @@ define dso_local fastcc void @P10_Spill_CR_GT() unnamed_addr { ; CHECK-LABEL: P10_Spill_CR_GT: -; CHECK: .localentry P10_Spill_CR_GT, 1 -; CHECK-NEXT: # %bb.0: # %bb +; CHECK: # %bb.0: # %bb ; CHECK-NEXT: mfcr r12 ; CHECK-NEXT: mflr r0 ; CHECK-NEXT: std r0, 16(r1) @@ -35,12 +34,10 @@ ; CHECK-NEXT: std r29, 40(r1) # 8-byte Folded Spill ; CHECK-NEXT: std r30, 48(r1) # 8-byte Folded Spill ; CHECK-NEXT: paddi r29, 0, .LJTI0_0@PCREL, 1 -; CHECK-NEXT: srwi r4, r3, 4 -; CHECK-NEXT: srwi r3, r3, 5 -; CHECK-NEXT: andi. r4, r4, 1 +; CHECK-NEXT: rlwinm. r4, r3, 28, 31, 31 ; CHECK-NEXT: li r4, 0 ; CHECK-NEXT: crmove 4*cr2+gt, gt -; CHECK-NEXT: andi. r3, r3, 1 +; CHECK-NEXT: rlwinm. r3, r3, 27, 31, 31 ; CHECK-NEXT: crmove 4*cr2+lt, gt ; CHECK-NEXT: cmplwi cr3, r3, 336 ; CHECK-NEXT: li r3, 0 @@ -225,12 +222,10 @@ ; CHECK-BE-NEXT: lwz r3, 0(r3) ; CHECK-BE-NEXT: std r29, 120(r1) # 8-byte Folded Spill ; CHECK-BE-NEXT: std r30, 128(r1) # 8-byte Folded Spill -; CHECK-BE-NEXT: srwi r4, r3, 4 -; CHECK-BE-NEXT: srwi r3, r3, 5 -; CHECK-BE-NEXT: andi. r4, r4, 1 +; CHECK-BE-NEXT: rlwinm. r4, r3, 28, 31, 31 ; CHECK-BE-NEXT: li r4, 0 ; CHECK-BE-NEXT: crmove 4*cr2+gt, gt -; CHECK-BE-NEXT: andi. r3, r3, 1 +; CHECK-BE-NEXT: rlwinm. r3, r3, 27, 31, 31 ; CHECK-BE-NEXT: crmove 4*cr2+lt, gt ; CHECK-BE-NEXT: cmplwi cr3, r3, 336 ; CHECK-BE-NEXT: li r3, 0 diff --git a/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll --- a/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll +++ b/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll @@ -65,8 +65,7 @@ ; CHECK-NEXT: crnot 4*cr2+eq, eq ; CHECK-NEXT: bl call_2@notoc ; CHECK-NEXT: mr r27, r3 -; CHECK-NEXT: srwi r3, r28, 4 -; CHECK-NEXT: andi. r3, r3, 1 +; CHECK-NEXT: rlwinm. r3, r28, 28, 31, 31 ; CHECK-NEXT: crmove 4*cr2+gt, gt ; CHECK-NEXT: bc 12, 4*cr5+lt, .LBB0_2 ; CHECK-NEXT: # %bb.1: # %bb9 @@ -74,8 +73,7 @@ ; CHECK-NEXT: mr r4, r30 ; CHECK-NEXT: bl call_3@notoc ; CHECK-NEXT: .LBB0_2: # %bb12 -; CHECK-NEXT: srwi r3, r28, 7 -; CHECK-NEXT: andi. r3, r3, 1 +; CHECK-NEXT: rlwinm. r3, r28, 25, 31, 31 ; CHECK-NEXT: crmove 4*cr2+un, gt ; CHECK-NEXT: bc 12, 4*cr2+eq, .LBB0_7 ; CHECK-NEXT: # %bb.3: # %bb37 @@ -213,8 +211,7 @@ ; CHECK-BE-NEXT: bl call_2 ; CHECK-BE-NEXT: nop ; CHECK-BE-NEXT: mr r27, r3 -; CHECK-BE-NEXT: srwi r3, r28, 4 -; CHECK-BE-NEXT: andi. r3, r3, 1 +; CHECK-BE-NEXT: rlwinm. r3, r28, 28, 31, 31 ; CHECK-BE-NEXT: crmove 4*cr2+gt, gt ; CHECK-BE-NEXT: bc 12, 4*cr5+lt, .LBB0_2 ; CHECK-BE-NEXT: # %bb.1: # %bb9 @@ -223,8 +220,7 @@ ; CHECK-BE-NEXT: bl call_3 ; CHECK-BE-NEXT: nop ; CHECK-BE-NEXT: .LBB0_2: # %bb12 -; CHECK-BE-NEXT: srwi r3, r28, 7 -; CHECK-BE-NEXT: andi. r3, r3, 1 +; CHECK-BE-NEXT: rlwinm. r3, r28, 25, 31, 31 ; CHECK-BE-NEXT: crmove 4*cr2+un, gt ; CHECK-BE-NEXT: bc 12, 4*cr2+eq, .LBB0_7 ; CHECK-BE-NEXT: # %bb.3: # %bb37 diff --git a/llvm/test/CodeGen/PowerPC/vsx_builtins.ll b/llvm/test/CodeGen/PowerPC/vsx_builtins.ll --- a/llvm/test/CodeGen/PowerPC/vsx_builtins.ll +++ b/llvm/test/CodeGen/PowerPC/vsx_builtins.ll @@ -142,8 +142,7 @@ ; CHECK-NEXT: xvtdivdp cr0, v2, v3 ; CHECK-NEXT: li r4, 222 ; CHECK-NEXT: mfocrf r3, 128 -; CHECK-NEXT: srwi r3, r3, 28 -; CHECK-NEXT: andi. r3, r3, 2 +; CHECK-NEXT: rlwinm. r3, r3, 4, 30, 30 ; CHECK-NEXT: li r3, 22 ; CHECK-NEXT: iseleq r3, r4, r3 ; CHECK-NEXT: blr