Index: llvm/lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -3582,7 +3582,7 @@ // The 32 bit and 64 bit instructions are quite different. if (SpecialShift32) { // Left shifts use (N, 0, 31-N), right shifts use (32-N, N, 31). - uint64_t SH = RightShift ? 32 - ShAmt : ShAmt; + uint64_t SH = ShAmt == 0 ? 0 : RightShift ? 32 - ShAmt : ShAmt; uint64_t MB = RightShift ? ShAmt : 0; uint64_t ME = RightShift ? 31 : 31 - ShAmt; replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH); @@ -3590,7 +3590,7 @@ .addImm(ME); } else { // Left shifts use (N, 63-N), right shifts use (64-N, N). - uint64_t SH = RightShift ? 64 - ShAmt : ShAmt; + uint64_t SH = ShAmt == 0 ? 0 : RightShift ? 64 - ShAmt : ShAmt; uint64_t ME = RightShift ? ShAmt : 63 - ShAmt; replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH); MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(ME); Index: llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -ppc-asm-full-reg-names < %s -O3 -mcpu=pwr9 -verify-machineinstrs | FileCheck %s +target datalayout = "e-m:e-i64:64-n32:64" +target triple = "powerpc64le-unknown-linux-gnu" + +define void @special_right_shift32_0() { +; CHECK-LABEL: special_right_shift32_0: +; CHECK: # %bb.0: # %bb +; CHECK-NEXT: lwz r3, 0(r3) +; CHECK-NEXT: li r4, 0 +; CHECK-NEXT: slwi r5, r3, 0 +; CHECK-NEXT: andi. r5, r5, 1 +; CHECK-NEXT: bc 12, gt, .LBB0_2 +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB0_1: # %bb8 +; CHECK-NEXT: # +; CHECK-NEXT: addi r4, r4, 1 +; CHECK-NEXT: srw r5, r3, r4 +; CHECK-NEXT: andi. r5, r5, 1 +; CHECK-NEXT: bc 4, gt, .LBB0_1 +; CHECK-NEXT: .LBB0_2: # %bb7 +bb: + %tmp = load i32, i32* undef, align 4 + br label %bb1 + +bb1: ; preds = %bb8, %bb + %tmp2 = phi i32 [ undef, %bb8 ], [ %tmp, %bb ] + %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %bb ] + %tmp4 = shl i32 1, %tmp3 + %tmp5 = and i32 %tmp2, %tmp4 + %tmp6 = icmp eq i32 %tmp5, 0 + br i1 %tmp6, label %bb8, label %bb7 + +bb7: ; preds = %bb1 + unreachable + +bb8: ; preds = %bb1 + %tmp9 = add nuw nsw i32 %tmp3, 1 + br label %bb1 +} + +define void @special_right_shift64_0() { +; CHECK-LABEL: special_right_shift64_0: +; CHECK: # %bb.0: # %bb +; CHECK-NEXT: ld r3, 0(r3) +; CHECK-NEXT: li r4, 0 +; CHECK-NEXT: rotldi r5, r3, 0 +; CHECK-NEXT: andi. r5, r5, 1 +; CHECK-NEXT: bc 12, gt, .LBB1_2 +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB1_1: # %bb8 +; CHECK-NEXT: # +; CHECK-NEXT: addi r4, r4, 1 +; CHECK-NEXT: srd r5, r3, r4 +; CHECK-NEXT: andi. r5, r5, 1 +; CHECK-NEXT: bc 4, gt, .LBB1_1 +; CHECK-NEXT: .LBB1_2: # %bb7 +bb: + %tmp = load i64, i64* undef, align 4 + br label %bb1 + +bb1: ; preds = %bb8, %bb + %tmp2 = phi i64 [ undef, %bb8 ], [ %tmp, %bb ] + %tmp3 = phi i64 [ %tmp9, %bb8 ], [ 0, %bb ] + %tmp4 = shl i64 1, %tmp3 + %tmp5 = and i64 %tmp2, %tmp4 + %tmp6 = icmp eq i64 %tmp5, 0 + br i1 %tmp6, label %bb8, label %bb7 + +bb7: ; preds = %bb1 + unreachable + +bb8: ; preds = %bb1 + %tmp9 = add nuw nsw i64 %tmp3, 1 + br label %bb1 +}