Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -3996,7 +3996,8 @@ } if (LC != RTLIB::UNKNOWN_LIBCALL && TLI.getLibcallName(LC)) { - SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue ShAmt = DAG.getAnyExtOrTrunc(N->getOperand(1), dl, MVT::i32); + SDValue Ops[2] = {N->getOperand(0), ShAmt}; TargetLowering::MakeLibCallOptions CallOptions; CallOptions.setSExt(isSigned); SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, CallOptions, dl).first, Lo, Hi); Index: llvm/test/CodeGen/AArch64/shift_minsize.ll =================================================================== --- llvm/test/CodeGen/AArch64/shift_minsize.ll +++ llvm/test/CodeGen/AArch64/shift_minsize.ll @@ -59,7 +59,6 @@ ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: mov w2, w2 ; CHECK-NEXT: bl __ashlti3 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret @@ -112,7 +111,6 @@ ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: mov w2, w2 ; CHECK-NEXT: bl __lshrti3 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/shifts.ll =================================================================== --- llvm/test/CodeGen/RISCV/shifts.ll +++ llvm/test/CodeGen/RISCV/shifts.ll @@ -7,6 +7,8 @@ ; Basic shift support is tested as part of ALU.ll. This file ensures that ; shifts which may not be supported natively are lowered properly. +declare i64 @llvm.fshr.i64(i64, i64, i64) + define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: ; RV32I: # %bb.0: @@ -142,6 +144,50 @@ ret i64 %1 } +define i64 @fshr64_minsize(i64 %a, i64 %b) minsize nounwind { +; RV32I-LABEL: fshr64_minsize: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: andi a2, a2, 63 +; RV32I-NEXT: call __lshrdi3@plt +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s4, a1 +; RV32I-NEXT: neg a0, s0 +; RV32I-NEXT: andi a2, a0, 63 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: call __ashldi3@plt +; RV32I-NEXT: or a0, s3, a0 +; RV32I-NEXT: or a1, s4, a1 +; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fshr64_minsize: +; RV64I: # %bb.0: +; RV64I-NEXT: srl a2, a0, a1 +; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: sll a0, a0, a1 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: ret + %res = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b) + ret i64 %res +} + define i128 @lshr128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: lshr128: ; RV32I: # %bb.0: @@ -155,104 +201,104 @@ ; RV32I-NEXT: addi a3, zero, 32 ; RV32I-NEXT: sub s0, a3, a2 ; RV32I-NEXT: addi t2, zero, 31 -; RV32I-NEXT: bltz s0, .LBB6_2 +; RV32I-NEXT: bltz s0, .LBB7_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a4, t1, s0 -; RV32I-NEXT: j .LBB6_3 -; RV32I-NEXT: .LBB6_2: +; RV32I-NEXT: j .LBB7_3 +; RV32I-NEXT: .LBB7_2: ; RV32I-NEXT: sll a3, t6, t5 ; RV32I-NEXT: sub a4, t2, t5 ; RV32I-NEXT: srli a5, t1, 1 ; RV32I-NEXT: srl a4, a5, a4 ; RV32I-NEXT: or a4, a3, a4 -; RV32I-NEXT: .LBB6_3: +; RV32I-NEXT: .LBB7_3: ; RV32I-NEXT: lw a3, 4(a1) ; RV32I-NEXT: addi t0, a2, -32 -; RV32I-NEXT: bgez t0, .LBB6_5 +; RV32I-NEXT: bgez t0, .LBB7_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: srl a5, a3, a2 ; RV32I-NEXT: or a4, a4, a5 -; RV32I-NEXT: .LBB6_5: +; RV32I-NEXT: .LBB7_5: ; RV32I-NEXT: addi t4, a2, -96 ; RV32I-NEXT: addi t3, a2, -64 -; RV32I-NEXT: bltz t4, .LBB6_7 +; RV32I-NEXT: bltz t4, .LBB7_7 ; RV32I-NEXT: # %bb.6: ; RV32I-NEXT: mv a5, zero -; RV32I-NEXT: bgeu a2, a6, .LBB6_8 -; RV32I-NEXT: j .LBB6_9 -; RV32I-NEXT: .LBB6_7: +; RV32I-NEXT: bgeu a2, a6, .LBB7_8 +; RV32I-NEXT: j .LBB7_9 +; RV32I-NEXT: .LBB7_7: ; RV32I-NEXT: srl a5, t6, t3 -; RV32I-NEXT: bltu a2, a6, .LBB6_9 -; RV32I-NEXT: .LBB6_8: +; RV32I-NEXT: bltu a2, a6, .LBB7_9 +; RV32I-NEXT: .LBB7_8: ; RV32I-NEXT: mv a4, a5 -; RV32I-NEXT: .LBB6_9: +; RV32I-NEXT: .LBB7_9: ; RV32I-NEXT: mv a7, a3 -; RV32I-NEXT: beqz a2, .LBB6_11 +; RV32I-NEXT: beqz a2, .LBB7_11 ; RV32I-NEXT: # %bb.10: ; RV32I-NEXT: mv a7, a4 -; RV32I-NEXT: .LBB6_11: +; RV32I-NEXT: .LBB7_11: ; RV32I-NEXT: lw a1, 0(a1) ; RV32I-NEXT: sub t2, t2, a2 -; RV32I-NEXT: bltz t0, .LBB6_13 +; RV32I-NEXT: bltz t0, .LBB7_13 ; RV32I-NEXT: # %bb.12: ; RV32I-NEXT: srl a3, a3, t0 -; RV32I-NEXT: bltz s0, .LBB6_14 -; RV32I-NEXT: j .LBB6_15 -; RV32I-NEXT: .LBB6_13: +; RV32I-NEXT: bltz s0, .LBB7_14 +; RV32I-NEXT: j .LBB7_15 +; RV32I-NEXT: .LBB7_13: ; RV32I-NEXT: srl a4, a1, a2 ; RV32I-NEXT: slli a3, a3, 1 ; RV32I-NEXT: sll a3, a3, t2 ; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: bgez s0, .LBB6_15 -; RV32I-NEXT: .LBB6_14: +; RV32I-NEXT: bgez s0, .LBB7_15 +; RV32I-NEXT: .LBB7_14: ; RV32I-NEXT: sll a4, t1, t5 ; RV32I-NEXT: or a3, a3, a4 -; RV32I-NEXT: .LBB6_15: +; RV32I-NEXT: .LBB7_15: ; RV32I-NEXT: slli a4, t6, 1 -; RV32I-NEXT: bltz t4, .LBB6_17 +; RV32I-NEXT: bltz t4, .LBB7_17 ; RV32I-NEXT: # %bb.16: ; RV32I-NEXT: srl a5, t6, t4 -; RV32I-NEXT: bgeu a2, a6, .LBB6_18 -; RV32I-NEXT: j .LBB6_19 -; RV32I-NEXT: .LBB6_17: +; RV32I-NEXT: bgeu a2, a6, .LBB7_18 +; RV32I-NEXT: j .LBB7_19 +; RV32I-NEXT: .LBB7_17: ; RV32I-NEXT: addi a5, zero, 95 ; RV32I-NEXT: sub a5, a5, a2 ; RV32I-NEXT: sll a5, a4, a5 ; RV32I-NEXT: srl s0, t1, t3 ; RV32I-NEXT: or a5, s0, a5 -; RV32I-NEXT: bltu a2, a6, .LBB6_19 -; RV32I-NEXT: .LBB6_18: +; RV32I-NEXT: bltu a2, a6, .LBB7_19 +; RV32I-NEXT: .LBB7_18: ; RV32I-NEXT: mv a3, a5 -; RV32I-NEXT: .LBB6_19: -; RV32I-NEXT: bnez a2, .LBB6_22 +; RV32I-NEXT: .LBB7_19: +; RV32I-NEXT: bnez a2, .LBB7_22 ; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: bltz t0, .LBB6_23 -; RV32I-NEXT: .LBB6_21: +; RV32I-NEXT: bltz t0, .LBB7_23 +; RV32I-NEXT: .LBB7_21: ; RV32I-NEXT: srl a3, t6, t0 -; RV32I-NEXT: bgeu a2, a6, .LBB6_24 -; RV32I-NEXT: j .LBB6_25 -; RV32I-NEXT: .LBB6_22: +; RV32I-NEXT: bgeu a2, a6, .LBB7_24 +; RV32I-NEXT: j .LBB7_25 +; RV32I-NEXT: .LBB7_22: ; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: bgez t0, .LBB6_21 -; RV32I-NEXT: .LBB6_23: +; RV32I-NEXT: bgez t0, .LBB7_21 +; RV32I-NEXT: .LBB7_23: ; RV32I-NEXT: srl a3, t1, a2 ; RV32I-NEXT: sll a4, a4, t2 ; RV32I-NEXT: or a3, a3, a4 -; RV32I-NEXT: bltu a2, a6, .LBB6_25 -; RV32I-NEXT: .LBB6_24: +; RV32I-NEXT: bltu a2, a6, .LBB7_25 +; RV32I-NEXT: .LBB7_24: ; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: .LBB6_25: -; RV32I-NEXT: bltz t0, .LBB6_27 +; RV32I-NEXT: .LBB7_25: +; RV32I-NEXT: bltz t0, .LBB7_27 ; RV32I-NEXT: # %bb.26: ; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: bgeu a2, a6, .LBB6_28 -; RV32I-NEXT: j .LBB6_29 -; RV32I-NEXT: .LBB6_27: +; RV32I-NEXT: bgeu a2, a6, .LBB7_28 +; RV32I-NEXT: j .LBB7_29 +; RV32I-NEXT: .LBB7_27: ; RV32I-NEXT: srl a4, t6, a2 -; RV32I-NEXT: bltu a2, a6, .LBB6_29 -; RV32I-NEXT: .LBB6_28: +; RV32I-NEXT: bltu a2, a6, .LBB7_29 +; RV32I-NEXT: .LBB7_28: ; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: .LBB6_29: +; RV32I-NEXT: .LBB7_29: ; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: sw a3, 8(a0) ; RV32I-NEXT: sw a1, 0(a0) @@ -264,12 +310,12 @@ ; RV64I-LABEL: lshr128: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a3, a2, -64 -; RV64I-NEXT: bltz a3, .LBB6_2 +; RV64I-NEXT: bltz a3, .LBB7_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srl a0, a1, a3 ; RV64I-NEXT: mv a1, zero ; RV64I-NEXT: ret -; RV64I-NEXT: .LBB6_2: +; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: addi a3, zero, 63 ; RV64I-NEXT: sub a3, a3, a2 @@ -296,105 +342,105 @@ ; RV32I-NEXT: addi a3, zero, 32 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: addi t4, zero, 31 -; RV32I-NEXT: bltz a3, .LBB7_2 +; RV32I-NEXT: bltz a3, .LBB8_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll s0, t6, a3 -; RV32I-NEXT: j .LBB7_3 -; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: j .LBB8_3 +; RV32I-NEXT: .LBB8_2: ; RV32I-NEXT: sll a4, s1, t2 ; RV32I-NEXT: sub a5, t4, t2 ; RV32I-NEXT: srli s0, t6, 1 ; RV32I-NEXT: srl a5, s0, a5 ; RV32I-NEXT: or s0, a4, a5 -; RV32I-NEXT: .LBB7_3: +; RV32I-NEXT: .LBB8_3: ; RV32I-NEXT: lw a5, 4(a1) ; RV32I-NEXT: addi a7, a2, -32 -; RV32I-NEXT: bgez a7, .LBB7_5 +; RV32I-NEXT: bgez a7, .LBB8_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: srl a4, a5, a2 ; RV32I-NEXT: or s0, s0, a4 -; RV32I-NEXT: .LBB7_5: +; RV32I-NEXT: .LBB8_5: ; RV32I-NEXT: addi t3, a2, -64 ; RV32I-NEXT: addi t5, a2, -96 ; RV32I-NEXT: srai t1, s1, 31 -; RV32I-NEXT: bltz t5, .LBB7_7 +; RV32I-NEXT: bltz t5, .LBB8_7 ; RV32I-NEXT: # %bb.6: ; RV32I-NEXT: mv a4, t1 -; RV32I-NEXT: bgeu a2, a6, .LBB7_8 -; RV32I-NEXT: j .LBB7_9 -; RV32I-NEXT: .LBB7_7: +; RV32I-NEXT: bgeu a2, a6, .LBB8_8 +; RV32I-NEXT: j .LBB8_9 +; RV32I-NEXT: .LBB8_7: ; RV32I-NEXT: sra a4, s1, t3 -; RV32I-NEXT: bltu a2, a6, .LBB7_9 -; RV32I-NEXT: .LBB7_8: +; RV32I-NEXT: bltu a2, a6, .LBB8_9 +; RV32I-NEXT: .LBB8_8: ; RV32I-NEXT: mv s0, a4 -; RV32I-NEXT: .LBB7_9: +; RV32I-NEXT: .LBB8_9: ; RV32I-NEXT: mv t0, a5 -; RV32I-NEXT: beqz a2, .LBB7_11 +; RV32I-NEXT: beqz a2, .LBB8_11 ; RV32I-NEXT: # %bb.10: ; RV32I-NEXT: mv t0, s0 -; RV32I-NEXT: .LBB7_11: +; RV32I-NEXT: .LBB8_11: ; RV32I-NEXT: lw a1, 0(a1) ; RV32I-NEXT: sub t4, t4, a2 -; RV32I-NEXT: bltz a7, .LBB7_13 +; RV32I-NEXT: bltz a7, .LBB8_13 ; RV32I-NEXT: # %bb.12: ; RV32I-NEXT: srl a5, a5, a7 -; RV32I-NEXT: bltz a3, .LBB7_14 -; RV32I-NEXT: j .LBB7_15 -; RV32I-NEXT: .LBB7_13: +; RV32I-NEXT: bltz a3, .LBB8_14 +; RV32I-NEXT: j .LBB8_15 +; RV32I-NEXT: .LBB8_13: ; RV32I-NEXT: srl a4, a1, a2 ; RV32I-NEXT: slli a5, a5, 1 ; RV32I-NEXT: sll a5, a5, t4 ; RV32I-NEXT: or a5, a4, a5 -; RV32I-NEXT: bgez a3, .LBB7_15 -; RV32I-NEXT: .LBB7_14: +; RV32I-NEXT: bgez a3, .LBB8_15 +; RV32I-NEXT: .LBB8_14: ; RV32I-NEXT: sll a3, t6, t2 ; RV32I-NEXT: or a5, a5, a3 -; RV32I-NEXT: .LBB7_15: +; RV32I-NEXT: .LBB8_15: ; RV32I-NEXT: slli a3, s1, 1 -; RV32I-NEXT: bltz t5, .LBB7_17 +; RV32I-NEXT: bltz t5, .LBB8_17 ; RV32I-NEXT: # %bb.16: ; RV32I-NEXT: sra s0, s1, t5 -; RV32I-NEXT: bgeu a2, a6, .LBB7_18 -; RV32I-NEXT: j .LBB7_19 -; RV32I-NEXT: .LBB7_17: +; RV32I-NEXT: bgeu a2, a6, .LBB8_18 +; RV32I-NEXT: j .LBB8_19 +; RV32I-NEXT: .LBB8_17: ; RV32I-NEXT: addi a4, zero, 95 ; RV32I-NEXT: sub a4, a4, a2 ; RV32I-NEXT: sll a4, a3, a4 ; RV32I-NEXT: srl s0, t6, t3 ; RV32I-NEXT: or s0, s0, a4 -; RV32I-NEXT: bltu a2, a6, .LBB7_19 -; RV32I-NEXT: .LBB7_18: +; RV32I-NEXT: bltu a2, a6, .LBB8_19 +; RV32I-NEXT: .LBB8_18: ; RV32I-NEXT: mv a5, s0 -; RV32I-NEXT: .LBB7_19: -; RV32I-NEXT: bnez a2, .LBB7_22 +; RV32I-NEXT: .LBB8_19: +; RV32I-NEXT: bnez a2, .LBB8_22 ; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: bltz a7, .LBB7_23 -; RV32I-NEXT: .LBB7_21: +; RV32I-NEXT: bltz a7, .LBB8_23 +; RV32I-NEXT: .LBB8_21: ; RV32I-NEXT: sra a3, s1, a7 -; RV32I-NEXT: bgeu a2, a6, .LBB7_24 -; RV32I-NEXT: j .LBB7_25 -; RV32I-NEXT: .LBB7_22: +; RV32I-NEXT: bgeu a2, a6, .LBB8_24 +; RV32I-NEXT: j .LBB8_25 +; RV32I-NEXT: .LBB8_22: ; RV32I-NEXT: mv a1, a5 -; RV32I-NEXT: bgez a7, .LBB7_21 -; RV32I-NEXT: .LBB7_23: +; RV32I-NEXT: bgez a7, .LBB8_21 +; RV32I-NEXT: .LBB8_23: ; RV32I-NEXT: srl a4, t6, a2 ; RV32I-NEXT: sll a3, a3, t4 ; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: bltu a2, a6, .LBB7_25 -; RV32I-NEXT: .LBB7_24: +; RV32I-NEXT: bltu a2, a6, .LBB8_25 +; RV32I-NEXT: .LBB8_24: ; RV32I-NEXT: mv a3, t1 -; RV32I-NEXT: .LBB7_25: -; RV32I-NEXT: bltz a7, .LBB7_27 +; RV32I-NEXT: .LBB8_25: +; RV32I-NEXT: bltz a7, .LBB8_27 ; RV32I-NEXT: # %bb.26: ; RV32I-NEXT: mv a4, t1 -; RV32I-NEXT: bgeu a2, a6, .LBB7_28 -; RV32I-NEXT: j .LBB7_29 -; RV32I-NEXT: .LBB7_27: +; RV32I-NEXT: bgeu a2, a6, .LBB8_28 +; RV32I-NEXT: j .LBB8_29 +; RV32I-NEXT: .LBB8_27: ; RV32I-NEXT: sra a4, s1, a2 -; RV32I-NEXT: bltu a2, a6, .LBB7_29 -; RV32I-NEXT: .LBB7_28: +; RV32I-NEXT: bltu a2, a6, .LBB8_29 +; RV32I-NEXT: .LBB8_28: ; RV32I-NEXT: mv a4, t1 -; RV32I-NEXT: .LBB7_29: +; RV32I-NEXT: .LBB8_29: ; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: sw a3, 8(a0) ; RV32I-NEXT: sw a1, 0(a0) @@ -407,12 +453,12 @@ ; RV64I-LABEL: ashr128: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a3, a2, -64 -; RV64I-NEXT: bltz a3, .LBB7_2 +; RV64I-NEXT: bltz a3, .LBB8_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: sra a0, a1, a3 ; RV64I-NEXT: srai a1, a1, 63 ; RV64I-NEXT: ret -; RV64I-NEXT: .LBB7_2: +; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: addi a3, zero, 63 ; RV64I-NEXT: sub a3, a3, a2 @@ -438,104 +484,104 @@ ; RV32I-NEXT: addi a3, zero, 32 ; RV32I-NEXT: sub s0, a3, a2 ; RV32I-NEXT: addi t2, zero, 31 -; RV32I-NEXT: bltz s0, .LBB8_2 +; RV32I-NEXT: bltz s0, .LBB9_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a4, t1, s0 -; RV32I-NEXT: j .LBB8_3 -; RV32I-NEXT: .LBB8_2: +; RV32I-NEXT: j .LBB9_3 +; RV32I-NEXT: .LBB9_2: ; RV32I-NEXT: srl a3, t6, t5 ; RV32I-NEXT: sub a4, t2, t5 ; RV32I-NEXT: slli a5, t1, 1 ; RV32I-NEXT: sll a4, a5, a4 ; RV32I-NEXT: or a4, a3, a4 -; RV32I-NEXT: .LBB8_3: +; RV32I-NEXT: .LBB9_3: ; RV32I-NEXT: lw a3, 8(a1) ; RV32I-NEXT: addi t0, a2, -32 -; RV32I-NEXT: bgez t0, .LBB8_5 +; RV32I-NEXT: bgez t0, .LBB9_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: sll a5, a3, a2 ; RV32I-NEXT: or a4, a4, a5 -; RV32I-NEXT: .LBB8_5: +; RV32I-NEXT: .LBB9_5: ; RV32I-NEXT: addi t4, a2, -96 ; RV32I-NEXT: addi t3, a2, -64 -; RV32I-NEXT: bltz t4, .LBB8_7 +; RV32I-NEXT: bltz t4, .LBB9_7 ; RV32I-NEXT: # %bb.6: ; RV32I-NEXT: mv a5, zero -; RV32I-NEXT: bgeu a2, a6, .LBB8_8 -; RV32I-NEXT: j .LBB8_9 -; RV32I-NEXT: .LBB8_7: +; RV32I-NEXT: bgeu a2, a6, .LBB9_8 +; RV32I-NEXT: j .LBB9_9 +; RV32I-NEXT: .LBB9_7: ; RV32I-NEXT: sll a5, t6, t3 -; RV32I-NEXT: bltu a2, a6, .LBB8_9 -; RV32I-NEXT: .LBB8_8: +; RV32I-NEXT: bltu a2, a6, .LBB9_9 +; RV32I-NEXT: .LBB9_8: ; RV32I-NEXT: mv a4, a5 -; RV32I-NEXT: .LBB8_9: +; RV32I-NEXT: .LBB9_9: ; RV32I-NEXT: mv a7, a3 -; RV32I-NEXT: beqz a2, .LBB8_11 +; RV32I-NEXT: beqz a2, .LBB9_11 ; RV32I-NEXT: # %bb.10: ; RV32I-NEXT: mv a7, a4 -; RV32I-NEXT: .LBB8_11: +; RV32I-NEXT: .LBB9_11: ; RV32I-NEXT: lw a1, 12(a1) ; RV32I-NEXT: sub t2, t2, a2 -; RV32I-NEXT: bltz t0, .LBB8_13 +; RV32I-NEXT: bltz t0, .LBB9_13 ; RV32I-NEXT: # %bb.12: ; RV32I-NEXT: sll a3, a3, t0 -; RV32I-NEXT: bltz s0, .LBB8_14 -; RV32I-NEXT: j .LBB8_15 -; RV32I-NEXT: .LBB8_13: +; RV32I-NEXT: bltz s0, .LBB9_14 +; RV32I-NEXT: j .LBB9_15 +; RV32I-NEXT: .LBB9_13: ; RV32I-NEXT: sll a4, a1, a2 ; RV32I-NEXT: srli a3, a3, 1 ; RV32I-NEXT: srl a3, a3, t2 ; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: bgez s0, .LBB8_15 -; RV32I-NEXT: .LBB8_14: +; RV32I-NEXT: bgez s0, .LBB9_15 +; RV32I-NEXT: .LBB9_14: ; RV32I-NEXT: srl a4, t1, t5 ; RV32I-NEXT: or a3, a3, a4 -; RV32I-NEXT: .LBB8_15: +; RV32I-NEXT: .LBB9_15: ; RV32I-NEXT: srli a4, t6, 1 -; RV32I-NEXT: bltz t4, .LBB8_17 +; RV32I-NEXT: bltz t4, .LBB9_17 ; RV32I-NEXT: # %bb.16: ; RV32I-NEXT: sll a5, t6, t4 -; RV32I-NEXT: bgeu a2, a6, .LBB8_18 -; RV32I-NEXT: j .LBB8_19 -; RV32I-NEXT: .LBB8_17: +; RV32I-NEXT: bgeu a2, a6, .LBB9_18 +; RV32I-NEXT: j .LBB9_19 +; RV32I-NEXT: .LBB9_17: ; RV32I-NEXT: addi a5, zero, 95 ; RV32I-NEXT: sub a5, a5, a2 ; RV32I-NEXT: srl a5, a4, a5 ; RV32I-NEXT: sll s0, t1, t3 ; RV32I-NEXT: or a5, s0, a5 -; RV32I-NEXT: bltu a2, a6, .LBB8_19 -; RV32I-NEXT: .LBB8_18: +; RV32I-NEXT: bltu a2, a6, .LBB9_19 +; RV32I-NEXT: .LBB9_18: ; RV32I-NEXT: mv a3, a5 -; RV32I-NEXT: .LBB8_19: -; RV32I-NEXT: bnez a2, .LBB8_22 +; RV32I-NEXT: .LBB9_19: +; RV32I-NEXT: bnez a2, .LBB9_22 ; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: bltz t0, .LBB8_23 -; RV32I-NEXT: .LBB8_21: +; RV32I-NEXT: bltz t0, .LBB9_23 +; RV32I-NEXT: .LBB9_21: ; RV32I-NEXT: sll a3, t6, t0 -; RV32I-NEXT: bgeu a2, a6, .LBB8_24 -; RV32I-NEXT: j .LBB8_25 -; RV32I-NEXT: .LBB8_22: +; RV32I-NEXT: bgeu a2, a6, .LBB9_24 +; RV32I-NEXT: j .LBB9_25 +; RV32I-NEXT: .LBB9_22: ; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: bgez t0, .LBB8_21 -; RV32I-NEXT: .LBB8_23: +; RV32I-NEXT: bgez t0, .LBB9_21 +; RV32I-NEXT: .LBB9_23: ; RV32I-NEXT: sll a3, t1, a2 ; RV32I-NEXT: srl a4, a4, t2 ; RV32I-NEXT: or a3, a3, a4 -; RV32I-NEXT: bltu a2, a6, .LBB8_25 -; RV32I-NEXT: .LBB8_24: +; RV32I-NEXT: bltu a2, a6, .LBB9_25 +; RV32I-NEXT: .LBB9_24: ; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: .LBB8_25: -; RV32I-NEXT: bltz t0, .LBB8_27 +; RV32I-NEXT: .LBB9_25: +; RV32I-NEXT: bltz t0, .LBB9_27 ; RV32I-NEXT: # %bb.26: ; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: bgeu a2, a6, .LBB8_28 -; RV32I-NEXT: j .LBB8_29 -; RV32I-NEXT: .LBB8_27: +; RV32I-NEXT: bgeu a2, a6, .LBB9_28 +; RV32I-NEXT: j .LBB9_29 +; RV32I-NEXT: .LBB9_27: ; RV32I-NEXT: sll a4, t6, a2 -; RV32I-NEXT: bltu a2, a6, .LBB8_29 -; RV32I-NEXT: .LBB8_28: +; RV32I-NEXT: bltu a2, a6, .LBB9_29 +; RV32I-NEXT: .LBB9_28: ; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: .LBB8_29: +; RV32I-NEXT: .LBB9_29: ; RV32I-NEXT: sw a4, 0(a0) ; RV32I-NEXT: sw a3, 4(a0) ; RV32I-NEXT: sw a1, 12(a0) @@ -547,12 +593,12 @@ ; RV64I-LABEL: shl128: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a3, a2, -64 -; RV64I-NEXT: bltz a3, .LBB8_2 +; RV64I-NEXT: bltz a3, .LBB9_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: sll a1, a0, a3 ; RV64I-NEXT: mv a0, zero ; RV64I-NEXT: ret -; RV64I-NEXT: .LBB8_2: +; RV64I-NEXT: .LBB9_2: ; RV64I-NEXT: sll a1, a1, a2 ; RV64I-NEXT: addi a3, zero, 63 ; RV64I-NEXT: sub a3, a3, a2 Index: llvm/test/CodeGen/X86/shift_minsize.ll =================================================================== --- llvm/test/CodeGen/X86/shift_minsize.ll +++ llvm/test/CodeGen/X86/shift_minsize.ll @@ -97,7 +97,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: movzbl %dl, %edx ; CHECK-NEXT: callq __ashlti3@PLT ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: .cfi_def_cfa_offset 8 @@ -173,7 +172,6 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: movzbl %dl, %edx ; CHECK-NEXT: callq __lshrti3@PLT ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: .cfi_def_cfa_offset 8