diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp @@ -178,15 +178,21 @@ // If the low 12 bits are non-zero, the first expansion may end with an ADDI // or ADDIW. If there are trailing zeros, try generating a sign extended // constant with no trailing zeros and use a final SLLI to restore them. - if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() > 2) { + if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) { unsigned TrailingZeros = countTrailingZeros((uint64_t)Val); int64_t ShiftedVal = Val >> TrailingZeros; + // If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since + // its more compressible. But only if LUI+ADDI(W) isn't fusable. + // NOTE: We don't check for C extension to minimize differences in generated + // code. + bool IsShiftedCompressible = + isInt<6>(ShiftedVal) && !ActiveFeatures[RISCV::TuneLUIADDIFusion]; RISCVMatInt::InstSeq TmpSeq; generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq); TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros); // Keep the new sequence if it is an improvement. - if (TmpSeq.size() < Res.size()) + if (TmpSeq.size() < Res.size() || IsShiftedCompressible) Res = TmpSeq; } diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll @@ -529,14 +529,14 @@ define half @callee_half_ret() nounwind { ; RV32I-LABEL: callee_half_ret: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a0, 4 -; RV32I-NEXT: addi a0, a0, -1024 +; RV32I-NEXT: li a0, 15 +; RV32I-NEXT: slli a0, a0, 10 ; RV32I-NEXT: ret ; ; RV64I-LABEL: callee_half_ret: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a0, 4 -; RV64I-NEXT: addiw a0, a0, -1024 +; RV64I-NEXT: li a0, 15 +; RV64I-NEXT: slli a0, a0, 10 ; RV64I-NEXT: ret ; ; RV32IF-LABEL: callee_half_ret: diff --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll --- a/llvm/test/CodeGen/RISCV/i32-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll @@ -598,8 +598,8 @@ define i32 @icmp_slt_constant_2048(i32 %a) nounwind { ; RV32I-LABEL: icmp_slt_constant_2048: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 1 -; RV32I-NEXT: addi a1, a1, -2048 +; RV32I-NEXT: li a1, 1 +; RV32I-NEXT: slli a1, a1, 11 ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: ret %1 = icmp slt i32 %a, 2048 @@ -663,8 +663,8 @@ define i32 @icmp_sle_constant_2047(i32 %a) nounwind { ; RV32I-LABEL: icmp_sle_constant_2047: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 1 -; RV32I-NEXT: addi a1, a1, -2048 +; RV32I-NEXT: li a1, 1 +; RV32I-NEXT: slli a1, a1, 11 ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: ret %1 = icmp sle i32 %a, 2047 diff --git a/llvm/test/CodeGen/RISCV/i64-icmp.ll b/llvm/test/CodeGen/RISCV/i64-icmp.ll --- a/llvm/test/CodeGen/RISCV/i64-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i64-icmp.ll @@ -598,8 +598,8 @@ define i64 @icmp_slt_constant_2048(i64 %a) nounwind { ; RV64I-LABEL: icmp_slt_constant_2048: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 11 ; RV64I-NEXT: slt a0, a0, a1 ; RV64I-NEXT: ret %1 = icmp slt i64 %a, 2048 @@ -663,8 +663,8 @@ define i64 @icmp_sle_constant_2047(i64 %a) nounwind { ; RV64I-LABEL: icmp_sle_constant_2047: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 11 ; RV64I-NEXT: slt a0, a0, a1 ; RV64I-NEXT: ret %1 = icmp sle i64 %a, 2047 diff --git a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll --- a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll +++ b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll @@ -26,3 +26,19 @@ } declare void @bar(i8*, float) + +; Test that we prefer lui+addiw over li+slli. +define i32 @test_matint() { +; NOFUSION-LABEL: test_matint: +; NOFUSION: # %bb.0: +; NOFUSION-NEXT: li a0, 1 +; NOFUSION-NEXT: slli a0, a0, 11 +; NOFUSION-NEXT: ret +; +; FUSION-LABEL: test_matint: +; FUSION: # %bb.0: +; FUSION-NEXT: lui a0, 1 +; FUSION-NEXT: addiw a0, a0, -2048 +; FUSION-NEXT: ret + ret i32 2048 +} diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -819,8 +819,8 @@ ; ; RV32IM-LABEL: muli32_p4352: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a1, 1 -; RV32IM-NEXT: addi a1, a1, 256 +; RV32IM-NEXT: li a1, 17 +; RV32IM-NEXT: slli a1, a1, 8 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -851,8 +851,8 @@ ; ; RV32IM-LABEL: muli32_p3840: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a1, 1 -; RV32IM-NEXT: addi a1, a1, -256 +; RV32IM-NEXT: li a1, 15 +; RV32IM-NEXT: slli a1, a1, 8 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -883,8 +883,8 @@ ; ; RV32IM-LABEL: muli32_m3840: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a1, 1048575 -; RV32IM-NEXT: addi a1, a1, 256 +; RV32IM-NEXT: li a1, -15 +; RV32IM-NEXT: slli a1, a1, 8 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -908,14 +908,14 @@ define i32 @muli32_m4352(i32 %a) nounwind { ; RV32I-LABEL: muli32_m4352: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 1048575 -; RV32I-NEXT: addi a1, a1, -256 +; RV32I-NEXT: li a1, -17 +; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: tail __mulsi3@plt ; ; RV32IM-LABEL: muli32_m4352: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a1, 1048575 -; RV32IM-NEXT: addi a1, a1, -256 +; RV32IM-NEXT: li a1, -17 +; RV32IM-NEXT: slli a1, a1, 8 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -923,8 +923,8 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1048575 -; RV64I-NEXT: addiw a1, a1, -256 +; RV64I-NEXT: li a1, -17 +; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -932,8 +932,8 @@ ; ; RV64IM-LABEL: muli32_m4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1048575 -; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: li a1, -17 +; RV64IM-NEXT: slli a1, a1, 8 ; RV64IM-NEXT: mulw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -4352 @@ -959,8 +959,8 @@ ; ; RV32IM-LABEL: muli64_p4352: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a2, 1 -; RV32IM-NEXT: addi a2, a2, 256 +; RV32IM-NEXT: li a2, 17 +; RV32IM-NEXT: slli a2, a2, 8 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: mulhu a3, a0, a2 ; RV32IM-NEXT: add a1, a3, a1 @@ -976,8 +976,8 @@ ; ; RV64IM-LABEL: muli64_p4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1 -; RV64IM-NEXT: addiw a1, a1, 256 +; RV64IM-NEXT: li a1, 17 +; RV64IM-NEXT: slli a1, a1, 8 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, 4352 @@ -1003,8 +1003,8 @@ ; ; RV32IM-LABEL: muli64_p3840: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a2, 1 -; RV32IM-NEXT: addi a2, a2, -256 +; RV32IM-NEXT: li a2, 15 +; RV32IM-NEXT: slli a2, a2, 8 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: mulhu a3, a0, a2 ; RV32IM-NEXT: add a1, a3, a1 @@ -1020,8 +1020,8 @@ ; ; RV64IM-LABEL: muli64_p3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1 -; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: li a1, 15 +; RV64IM-NEXT: slli a1, a1, 8 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, 3840 @@ -1033,8 +1033,8 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a2, 1048575 -; RV32I-NEXT: addi a2, a2, -256 +; RV32I-NEXT: li a2, -17 +; RV32I-NEXT: slli a2, a2, 8 ; RV32I-NEXT: li a3, -1 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1043,8 +1043,8 @@ ; ; RV32IM-LABEL: muli64_m4352: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a2, 1048575 -; RV32IM-NEXT: addi a2, a2, -256 +; RV32IM-NEXT: li a2, -17 +; RV32IM-NEXT: slli a2, a2, 8 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: mulhu a3, a0, a2 ; RV32IM-NEXT: sub a3, a3, a0 @@ -1054,14 +1054,14 @@ ; ; RV64I-LABEL: muli64_m4352: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1048575 -; RV64I-NEXT: addiw a1, a1, -256 +; RV64I-NEXT: li a1, -17 +; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: tail __muldi3@plt ; ; RV64IM-LABEL: muli64_m4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1048575 -; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: li a1, -17 +; RV64IM-NEXT: slli a1, a1, 8 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, -4352 @@ -1087,8 +1087,8 @@ ; ; RV32IM-LABEL: muli64_m3840: ; RV32IM: # %bb.0: -; RV32IM-NEXT: lui a2, 1048575 -; RV32IM-NEXT: addi a2, a2, 256 +; RV32IM-NEXT: li a2, -15 +; RV32IM-NEXT: slli a2, a2, 8 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: mulhu a3, a0, a2 ; RV32IM-NEXT: sub a3, a3, a0 @@ -1105,8 +1105,8 @@ ; ; RV64IM-LABEL: muli64_m3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1048575 -; RV64IM-NEXT: addiw a1, a1, 256 +; RV64IM-NEXT: li a1, -15 +; RV64IM-NEXT: slli a1, a1, 8 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, -3840 @@ -1171,8 +1171,8 @@ ; RV32IM-NEXT: lw a3, 8(a1) ; RV32IM-NEXT: lw a4, 0(a1) ; RV32IM-NEXT: lw a1, 4(a1) -; RV32IM-NEXT: lui a5, 1048575 -; RV32IM-NEXT: addi a5, a5, 256 +; RV32IM-NEXT: li a5, -15 +; RV32IM-NEXT: slli a5, a5, 8 ; RV32IM-NEXT: mulhu a6, a4, a5 ; RV32IM-NEXT: mul a7, a1, a5 ; RV32IM-NEXT: add a6, a7, a6 @@ -1236,8 +1236,8 @@ ; ; RV64IM-LABEL: muli128_m3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a2, 1048575 -; RV64IM-NEXT: addiw a2, a2, 256 +; RV64IM-NEXT: li a2, -15 +; RV64IM-NEXT: slli a2, a2, 8 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: mulhu a3, a0, a2 ; RV64IM-NEXT: sub a3, a3, a0 diff --git a/llvm/test/CodeGen/RISCV/pr58511.ll b/llvm/test/CodeGen/RISCV/pr58511.ll --- a/llvm/test/CodeGen/RISCV/pr58511.ll +++ b/llvm/test/CodeGen/RISCV/pr58511.ll @@ -9,8 +9,8 @@ ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 -; CHECK-NEXT: lui a3, 1 -; CHECK-NEXT: addiw a3, a3, -2048 +; CHECK-NEXT: li a3, 1 +; CHECK-NEXT: slli a3, a3, 11 ; CHECK-NEXT: or a0, a0, a3 ; CHECK-NEXT: sw a1, 0(a2) ; CHECK-NEXT: ret @@ -30,8 +30,8 @@ ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: lui a3, 1 -; CHECK-NEXT: addiw a3, a3, -2048 +; CHECK-NEXT: li a3, 1 +; CHECK-NEXT: slli a3, a3, 11 ; CHECK-NEXT: or a0, a0, a3 ; CHECK-NEXT: sw a1, 0(a2) ; CHECK-NEXT: ret @@ -69,8 +69,8 @@ ; CHECK-NEXT: slliw a1, a1, 12 ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: lui a3, 1 -; CHECK-NEXT: addiw a3, a3, -2048 +; CHECK-NEXT: li a3, 1 +; CHECK-NEXT: slli a3, a3, 11 ; CHECK-NEXT: and a0, a0, a3 ; CHECK-NEXT: sw a1, 0(a2) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll --- a/llvm/test/CodeGen/RISCV/rv32zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -512,8 +512,8 @@ define i32 @bseti_i32_11(i32 %a) nounwind { ; RV32I-LABEL: bseti_i32_11: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 1 -; RV32I-NEXT: addi a1, a1, -2048 +; RV32I-NEXT: li a1, 1 +; RV32I-NEXT: slli a1, a1, 11 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; @@ -567,8 +567,8 @@ define i32 @binvi_i32_11(i32 %a) nounwind { ; RV32I-LABEL: binvi_i32_11: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 1 -; RV32I-NEXT: addi a1, a1, -2048 +; RV32I-NEXT: li a1, 1 +; RV32I-NEXT: slli a1, a1, 11 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll --- a/llvm/test/CodeGen/RISCV/rv64zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll @@ -684,8 +684,8 @@ define signext i32 @bseti_i32_11(i32 signext %a) nounwind { ; RV64I-LABEL: bseti_i32_11: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 11 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; @@ -734,8 +734,8 @@ define i64 @bseti_i64_11(i64 %a) nounwind { ; RV64I-LABEL: bseti_i64_11: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 11 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; @@ -822,8 +822,8 @@ define signext i32 @binvi_i32_11(i32 signext %a) nounwind { ; RV64I-LABEL: binvi_i32_11: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 11 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret ; @@ -872,8 +872,8 @@ define i64 @binvi_i64_11(i64 %a) nounwind { ; RV64I-LABEL: binvi_i64_11: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 11 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll @@ -68,8 +68,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: lui a0, 1 -; CHECK-NEXT: addiw a0, a0, -2048 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: slli a0, a0, 11 ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %mask = call @llvm.get.active.lane.mask.nxv1i1.i64(i64 0, i64 2048) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -238,65 +238,35 @@ } define void @buildvec_no_vid_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3, <4 x i8>* %z4, <4 x i8>* %z5) { -; RV32-LABEL: buildvec_no_vid_v4i8: -; RV32: # %bb.0: -; RV32-NEXT: lui a6, %hi(.LCPI14_0) -; RV32-NEXT: addi a6, a6, %lo(.LCPI14_0) -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; RV32-NEXT: vle8.v v8, (a6) -; RV32-NEXT: lui a6, %hi(.LCPI14_1) -; RV32-NEXT: addi a6, a6, %lo(.LCPI14_1) -; RV32-NEXT: vle8.v v9, (a6) -; RV32-NEXT: vse8.v v8, (a0) -; RV32-NEXT: vse8.v v9, (a1) -; RV32-NEXT: lui a0, 1 -; RV32-NEXT: addi a0, a0, -2048 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; RV32-NEXT: vmv.v.x v8, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; RV32-NEXT: vse8.v v8, (a2) -; RV32-NEXT: li a0, 2047 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; RV32-NEXT: vmv.v.x v8, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; RV32-NEXT: lui a0, %hi(.LCPI14_2) -; RV32-NEXT: addi a0, a0, %lo(.LCPI14_2) -; RV32-NEXT: vle8.v v9, (a0) -; RV32-NEXT: vse8.v v8, (a3) -; RV32-NEXT: vmv.v.i v8, -2 -; RV32-NEXT: vse8.v v8, (a4) -; RV32-NEXT: vse8.v v9, (a5) -; RV32-NEXT: ret -; -; RV64-LABEL: buildvec_no_vid_v4i8: -; RV64: # %bb.0: -; RV64-NEXT: lui a6, %hi(.LCPI14_0) -; RV64-NEXT: addi a6, a6, %lo(.LCPI14_0) -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; RV64-NEXT: vle8.v v8, (a6) -; RV64-NEXT: lui a6, %hi(.LCPI14_1) -; RV64-NEXT: addi a6, a6, %lo(.LCPI14_1) -; RV64-NEXT: vle8.v v9, (a6) -; RV64-NEXT: vse8.v v8, (a0) -; RV64-NEXT: vse8.v v9, (a1) -; RV64-NEXT: lui a0, 1 -; RV64-NEXT: addiw a0, a0, -2048 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; RV64-NEXT: vmv.v.x v8, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; RV64-NEXT: vse8.v v8, (a2) -; RV64-NEXT: li a0, 2047 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; RV64-NEXT: vmv.v.x v8, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; RV64-NEXT: lui a0, %hi(.LCPI14_2) -; RV64-NEXT: addi a0, a0, %lo(.LCPI14_2) -; RV64-NEXT: vle8.v v9, (a0) -; RV64-NEXT: vse8.v v8, (a3) -; RV64-NEXT: vmv.v.i v8, -2 -; RV64-NEXT: vse8.v v8, (a4) -; RV64-NEXT: vse8.v v9, (a5) -; RV64-NEXT: ret +; CHECK-LABEL: buildvec_no_vid_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a6, %hi(.LCPI14_0) +; CHECK-NEXT: addi a6, a6, %lo(.LCPI14_0) +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vle8.v v8, (a6) +; CHECK-NEXT: lui a6, %hi(.LCPI14_1) +; CHECK-NEXT: addi a6, a6, %lo(.LCPI14_1) +; CHECK-NEXT: vle8.v v9, (a6) +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse8.v v9, (a1) +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: slli a0, a0, 11 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vse8.v v8, (a2) +; CHECK-NEXT: li a0, 2047 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: lui a0, %hi(.LCPI14_2) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_2) +; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vse8.v v8, (a3) +; CHECK-NEXT: vmv.v.i v8, -2 +; CHECK-NEXT: vse8.v v8, (a4) +; CHECK-NEXT: vse8.v v9, (a5) +; CHECK-NEXT: ret store <4 x i8> , <4 x i8>* %z0 store <4 x i8> , <4 x i8>* %z1 store <4 x i8> , <4 x i8>* %z2 diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir --- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir +++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir @@ -20,8 +20,8 @@ ; CHECK-NEXT: sd a0, 8(sp) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: sd a1, 0(sp) - ; CHECK-NEXT: lui a1, 1 - ; CHECK-NEXT: addiw a1, a1, -1024 + ; CHECK-NEXT: li a1, 3 + ; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: ld a1, 0(sp) ; CHECK-NEXT: sub sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll --- a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll @@ -93,8 +93,8 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: neg a0, a0 -; CHECK-NEXT: lui a1, 1 -; CHECK-NEXT: addiw a1, a1, -2048 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: slli a1, a1, 11 ; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() diff --git a/llvm/test/MC/RISCV/rv32c-aliases-valid.s b/llvm/test/MC/RISCV/rv32c-aliases-valid.s --- a/llvm/test/MC/RISCV/rv32c-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv32c-aliases-valid.s @@ -24,8 +24,8 @@ li x10, 2047 # CHECK-EXPAND: addi a0, zero, -2047 li x10, -2047 -# CHECK-EXPAND: c.lui a1, 1 -# CHECK-EXPAND: addi a1, a1, -2048 +# CHECK-EXPAND: c.li a1, 1 +# CHECK-EXPAND: c.slli a1, 11 li x11, 2048 # CHECK-EXPAND: addi a1, zero, -2048 li x11, -2048 diff --git a/llvm/test/MC/RISCV/rv32i-aliases-valid.s b/llvm/test/MC/RISCV/rv32i-aliases-valid.s --- a/llvm/test/MC/RISCV/rv32i-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv32i-aliases-valid.s @@ -32,8 +32,10 @@ # CHECK-INST: addi a0, zero, -2047 # CHECK-ALIAS: li a0, -2047 li x10, -2047 -# CHECK-EXPAND: lui a1, 1 -# CHECK-EXPAND: addi a1, a1, -2048 +# CHECK-INST: addi a1, zero, 1 +# CHECK-INST: slli a1, a1, 11 +# CHECK-ALIAS: li a1, 1 +# CHECK-ALIAS: slli a1, a1, 11 li x11, 2048 # CHECK-INST: addi a1, zero, -2048 # CHECK-ALIAS: li a1, -2048 diff --git a/llvm/test/MC/RISCV/rv64c-aliases-valid.s b/llvm/test/MC/RISCV/rv64c-aliases-valid.s --- a/llvm/test/MC/RISCV/rv64c-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv64c-aliases-valid.s @@ -24,8 +24,8 @@ li x10, 2047 # CHECK-EXPAND: addi a0, zero, -2047 li x10, -2047 -# CHECK-EXPAND: c.lui a1, 1 -# CHECK-EXPAND: addiw a1, a1, -2048 +# CHECK-EXPAND: c.li a1, 1 +# CHECK-EXPAND: c.slli a1, 11 li x11, 2048 # CHECK-EXPAND: addi a1, zero, -2048 li x11, -2048 diff --git a/llvm/test/MC/RISCV/rv64i-aliases-valid.s b/llvm/test/MC/RISCV/rv64i-aliases-valid.s --- a/llvm/test/MC/RISCV/rv64i-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv64i-aliases-valid.s @@ -35,8 +35,10 @@ # CHECK-INST: addi a0, zero, -2047 # CHECK-ALIAS: li a0, -2047 li x10, -2047 -# CHECK-EXPAND: lui a1, 1 -# CHECK-EXPAND: addiw a1, a1, -2048 +# CHECK-INST: addi a1, zero, 1 +# CHECK-INST: slli a1, a1, 11 +# CHECK-ALIAS: li a1, 1 +# CHECK-ALIAS: slli a1, a1, 11 li x11, 2048 # CHECK-INST: addi a1, zero, -2048 # CHECK-ALIAS: li a1, -2048