diff --git a/compiler-rt/lib/builtins/riscv/int_mul_impl.inc b/compiler-rt/lib/builtins/riscv/int_mul_impl.inc --- a/compiler-rt/lib/builtins/riscv/int_mul_impl.inc +++ b/compiler-rt/lib/builtins/riscv/int_mul_impl.inc @@ -21,7 +21,7 @@ .type __mulxi3, @function __mulxi3: mv a2, a0 - mv a0, zero + li a0, 0 .L1: andi a3, a1, 1 beqz a3, .L2 diff --git a/compiler-rt/lib/builtins/riscv/save.S b/compiler-rt/lib/builtins/riscv/save.S --- a/compiler-rt/lib/builtins/riscv/save.S +++ b/compiler-rt/lib/builtins/riscv/save.S @@ -22,7 +22,7 @@ .type __riscv_save_12,@function __riscv_save_12: addi sp, sp, -64 - mv t1, zero + li t1, 0 sw s11, 12(sp) j .Lriscv_save_11_8 @@ -98,7 +98,7 @@ .type __riscv_save_12,@function __riscv_save_12: addi sp, sp, -112 - mv t1, zero + li t1, 0 sd s11, 8(sp) j .Lriscv_save_11_10 diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp @@ -1347,7 +1347,7 @@ "jalr a5\n" // Call _exit(a0). - "addi a7, zero, %9\n" + "li a7, %9\n" "ecall\n" "1:\n" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -705,6 +705,7 @@ def PseudoSD : PseudoStore<"sd">; } // Predicates = [IsRV64] +def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12:$imm)>; def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>; def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>; def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>; diff --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll --- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -10,14 +10,14 @@ define i32 @add_mul_combine_accept_a1(i32 %x) { ; RV32IMB-LABEL: add_mul_combine_accept_a1: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a1: ; RV64IMB: # %bb.0: -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 ; RV64IMB-NEXT: ret @@ -29,14 +29,14 @@ define signext i32 @add_mul_combine_accept_a2(i32 signext %x) { ; RV32IMB-LABEL: add_mul_combine_accept_a2: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a2: ; RV64IMB: # %bb.0: -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 ; RV64IMB-NEXT: ret @@ -48,7 +48,7 @@ define i64 @add_mul_combine_accept_a3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_accept_a3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 29 +; RV32IMB-NEXT: li a2, 29 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -60,7 +60,7 @@ ; ; RV64IMB-LABEL: add_mul_combine_accept_a3: ; RV64IMB: # %bb.0: -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 1073 ; RV64IMB-NEXT: ret @@ -72,7 +72,7 @@ define i32 @add_mul_combine_accept_b1(i32 %x) { ; RV32IMB-LABEL: add_mul_combine_accept_b1: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a1, zero, 23 +; RV32IMB-NEXT: li a1, 23 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 @@ -81,7 +81,7 @@ ; ; RV64IMB-LABEL: add_mul_combine_accept_b1: ; RV64IMB: # %bb.0: -; RV64IMB-NEXT: addi a1, zero, 23 +; RV64IMB-NEXT: li a1, 23 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 @@ -95,7 +95,7 @@ define signext i32 @add_mul_combine_accept_b2(i32 signext %x) { ; RV32IMB-LABEL: add_mul_combine_accept_b2: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a1, zero, 23 +; RV32IMB-NEXT: li a1, 23 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 @@ -104,7 +104,7 @@ ; ; RV64IMB-LABEL: add_mul_combine_accept_b2: ; RV64IMB: # %bb.0: -; RV64IMB-NEXT: addi a1, zero, 23 +; RV64IMB-NEXT: li a1, 23 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 @@ -118,7 +118,7 @@ define i64 @add_mul_combine_accept_b3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_accept_b3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 23 +; RV32IMB-NEXT: li a2, 23 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -132,7 +132,7 @@ ; ; RV64IMB-LABEL: add_mul_combine_accept_b3: ; RV64IMB: # %bb.0: -; RV64IMB-NEXT: addi a1, zero, 23 +; RV64IMB-NEXT: li a1, 23 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 @@ -147,14 +147,14 @@ ; RV32IMB-LABEL: add_mul_combine_reject_a1: ; RV32IMB: # %bb.0: ; RV32IMB-NEXT: addi a0, a0, 1971 -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a1: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addiw a0, a0, 1971 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 @@ -166,14 +166,14 @@ ; RV32IMB-LABEL: add_mul_combine_reject_a2: ; RV32IMB: # %bb.0: ; RV32IMB-NEXT: addi a0, a0, 1971 -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a2: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addiw a0, a0, 1971 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 @@ -184,7 +184,7 @@ define i64 @add_mul_combine_reject_a3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_a3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 29 +; RV32IMB-NEXT: li a2, 29 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -199,7 +199,7 @@ ; RV64IMB-LABEL: add_mul_combine_reject_a3: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addi a0, a0, 1971 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1971 @@ -250,7 +250,7 @@ define i64 @add_mul_combine_reject_c3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_c3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 73 +; RV32IMB-NEXT: li a2, 73 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -314,7 +314,7 @@ define i64 @add_mul_combine_reject_d3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_d3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 192 +; RV32IMB-NEXT: li a2, 192 ; RV32IMB-NEXT: mulhu a2, a0, a2 ; RV32IMB-NEXT: sh1add a1, a1, a1 ; RV32IMB-NEXT: slli a1, a1, 6 @@ -343,14 +343,14 @@ ; RV32IMB-LABEL: add_mul_combine_reject_e1: ; RV32IMB: # %bb.0: ; RV32IMB-NEXT: addi a0, a0, 1971 -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e1: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addiw a0, a0, 1971 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 @@ -362,14 +362,14 @@ ; RV32IMB-LABEL: add_mul_combine_reject_e2: ; RV32IMB: # %bb.0: ; RV32IMB-NEXT: addi a0, a0, 1971 -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e2: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addiw a0, a0, 1971 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 @@ -380,7 +380,7 @@ define i64 @add_mul_combine_reject_e3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_e3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 29 +; RV32IMB-NEXT: li a2, 29 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -395,7 +395,7 @@ ; RV64IMB-LABEL: add_mul_combine_reject_e3: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addi a0, a0, 1971 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 @@ -407,7 +407,7 @@ ; RV32IMB-LABEL: add_mul_combine_reject_f1: ; RV32IMB: # %bb.0: ; RV32IMB-NEXT: addi a0, a0, 1972 -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 ; RV32IMB-NEXT: ret @@ -415,7 +415,7 @@ ; RV64IMB-LABEL: add_mul_combine_reject_f1: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addiw a0, a0, 1972 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 ; RV64IMB-NEXT: ret @@ -428,7 +428,7 @@ ; RV32IMB-LABEL: add_mul_combine_reject_f2: ; RV32IMB: # %bb.0: ; RV32IMB-NEXT: addi a0, a0, 1972 -; RV32IMB-NEXT: addi a1, zero, 29 +; RV32IMB-NEXT: li a1, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 ; RV32IMB-NEXT: ret @@ -436,7 +436,7 @@ ; RV64IMB-LABEL: add_mul_combine_reject_f2: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addiw a0, a0, 1972 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 ; RV64IMB-NEXT: ret @@ -448,7 +448,7 @@ define i64 @add_mul_combine_reject_f3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_f3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 29 +; RV32IMB-NEXT: li a2, 29 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -463,7 +463,7 @@ ; RV64IMB-LABEL: add_mul_combine_reject_f3: ; RV64IMB: # %bb.0: ; RV64IMB-NEXT: addi a0, a0, 1972 -; RV64IMB-NEXT: addi a1, zero, 29 +; RV64IMB-NEXT: li a1, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 11 ; RV64IMB-NEXT: ret @@ -517,7 +517,7 @@ define i64 @add_mul_combine_reject_g3(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_reject_g3: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 73 +; RV32IMB-NEXT: li a2, 73 ; RV32IMB-NEXT: mul a1, a1, a2 ; RV32IMB-NEXT: mulhu a3, a0, a2 ; RV32IMB-NEXT: add a1, a3, a1 @@ -545,7 +545,7 @@ define i64 @add_mul_combine_infinite_loop(i64 %x) { ; RV32IMB-LABEL: add_mul_combine_infinite_loop: ; RV32IMB: # %bb.0: -; RV32IMB-NEXT: addi a2, zero, 24 +; RV32IMB-NEXT: li a2, 24 ; RV32IMB-NEXT: mulhu a2, a0, a2 ; RV32IMB-NEXT: sh1add a1, a1, a1 ; RV32IMB-NEXT: sh3add a1, a1, a2 diff --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll --- a/llvm/test/CodeGen/RISCV/alloca.ll +++ b/llvm/test/CodeGen/RISCV/alloca.ll @@ -76,20 +76,20 @@ ; RV32I-NEXT: sub a0, sp, a0 ; RV32I-NEXT: mv sp, a0 ; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: addi a1, zero, 12 +; RV32I-NEXT: li a1, 12 ; RV32I-NEXT: sw a1, 12(sp) -; RV32I-NEXT: addi a1, zero, 11 +; RV32I-NEXT: li a1, 11 ; RV32I-NEXT: sw a1, 8(sp) -; RV32I-NEXT: addi a1, zero, 10 +; RV32I-NEXT: li a1, 10 ; RV32I-NEXT: sw a1, 4(sp) -; RV32I-NEXT: addi t0, zero, 9 -; RV32I-NEXT: addi a1, zero, 2 -; RV32I-NEXT: addi a2, zero, 3 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 6 -; RV32I-NEXT: addi a6, zero, 7 -; RV32I-NEXT: addi a7, zero, 8 +; RV32I-NEXT: li t0, 9 +; RV32I-NEXT: li a1, 2 +; RV32I-NEXT: li a2, 3 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 6 +; RV32I-NEXT: li a6, 7 +; RV32I-NEXT: li a7, 8 ; RV32I-NEXT: sw t0, 0(sp) ; RV32I-NEXT: call func@plt ; RV32I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll --- a/llvm/test/CodeGen/RISCV/alu32.ll +++ b/llvm/test/CodeGen/RISCV/alu32.ll @@ -192,13 +192,13 @@ define i32 @sub_negative_constant_lhs(i32 %a) nounwind { ; RV32I-LABEL: sub_negative_constant_lhs: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -2 +; RV32I-NEXT: li a1, -2 ; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub_negative_constant_lhs: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -2 +; RV64I-NEXT: li a1, -2 ; RV64I-NEXT: subw a0, a1, a0 ; RV64I-NEXT: ret %1 = sub i32 -2, %a @@ -222,13 +222,13 @@ define i32 @sll_negative_constant_lhs(i32 %a) nounwind { ; RV32I-LABEL: sll_negative_constant_lhs: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: sll a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sll_negative_constant_lhs: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: sllw a0, a1, a0 ; RV64I-NEXT: ret %1 = shl i32 -1, %a @@ -300,13 +300,13 @@ define i32 @srl_negative_constant_lhs(i32 %a) nounwind { ; RV32I-LABEL: srl_negative_constant_lhs: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: srl a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: srl_negative_constant_lhs: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: srlw a0, a1, a0 ; RV64I-NEXT: ret %1 = lshr i32 -1, %a diff --git a/llvm/test/CodeGen/RISCV/alu64.ll b/llvm/test/CodeGen/RISCV/alu64.ll --- a/llvm/test/CodeGen/RISCV/alu64.ll +++ b/llvm/test/CodeGen/RISCV/alu64.ll @@ -39,11 +39,11 @@ ; RV32I-NEXT: beqz a1, .LBB1_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slti a0, a1, 0 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: sltiu a0, a0, 2 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret %1 = icmp slt i64 %a, 2 %2 = zext i1 %1 to i64 @@ -60,12 +60,12 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: beqz a1, .LBB2_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: sltiu a0, a0, 3 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret %1 = icmp ult i64 %a, 3 %2 = zext i1 %1 to i64 @@ -109,7 +109,7 @@ ; RV32I-LABEL: andi: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 6 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret %1 = and i64 %a, 6 ret i64 %1 @@ -215,11 +215,11 @@ ; RV32I-NEXT: bltz a3, .LBB11_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a3 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB11_2: ; RV32I-NEXT: sll a1, a1, a2 -; RV32I-NEXT: addi a3, zero, 31 +; RV32I-NEXT: li a3, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: srli a4, a0, 1 ; RV32I-NEXT: srl a3, a4, a3 @@ -241,11 +241,11 @@ ; RV32I-NEXT: beq a1, a3, .LBB12_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slt a0, a1, a3 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB12_2: ; RV32I-NEXT: sltu a0, a0, a2 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret %1 = icmp slt i64 %a, %b %2 = zext i1 %1 to i64 @@ -263,11 +263,11 @@ ; RV32I-NEXT: beq a1, a3, .LBB13_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a0, a1, a3 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB13_2: ; RV32I-NEXT: sltu a0, a0, a2 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret %1 = icmp ult i64 %a, %b %2 = zext i1 %1 to i64 @@ -301,11 +301,11 @@ ; RV32I-NEXT: bltz a3, .LBB15_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a3 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB15_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a3, zero, 31 +; RV32I-NEXT: li a3, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 @@ -332,7 +332,7 @@ ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB16_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a3, zero, 31 +; RV32I-NEXT: li a3, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 diff --git a/llvm/test/CodeGen/RISCV/analyze-branch.ll b/llvm/test/CodeGen/RISCV/analyze-branch.ll --- a/llvm/test/CodeGen/RISCV/analyze-branch.ll +++ b/llvm/test/CodeGen/RISCV/analyze-branch.ll @@ -17,7 +17,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 42 +; RV32I-NEXT: li a1, 42 ; RV32I-NEXT: bne a0, a1, .LBB0_3 ; RV32I-NEXT: # %bb.1: # %true ; RV32I-NEXT: call test_true@plt @@ -49,7 +49,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 42 +; RV32I-NEXT: li a1, 42 ; RV32I-NEXT: beq a0, a1, .LBB1_3 ; RV32I-NEXT: # %bb.1: # %false ; RV32I-NEXT: call test_false@plt diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -15,8 +15,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -26,7 +26,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -51,8 +51,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -62,7 +62,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -91,8 +91,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -102,7 +102,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -127,8 +127,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -138,7 +138,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -167,8 +167,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -178,7 +178,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -203,8 +203,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -214,7 +214,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -243,8 +243,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 3 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -254,7 +254,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -279,8 +279,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -290,7 +290,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -319,8 +319,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 3 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -330,7 +330,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -355,8 +355,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -366,7 +366,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -395,8 +395,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -406,7 +406,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -431,8 +431,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -442,7 +442,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -471,8 +471,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -482,7 +482,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -507,8 +507,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -518,7 +518,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -547,8 +547,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -558,7 +558,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -583,8 +583,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -594,7 +594,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -623,8 +623,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -634,7 +634,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -659,8 +659,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -670,7 +670,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -699,8 +699,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -710,7 +710,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a3, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a4, a4, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -735,8 +735,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -746,7 +746,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -775,8 +775,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -812,8 +812,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -853,8 +853,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -890,8 +890,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -931,8 +931,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -968,8 +968,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1009,8 +1009,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 3 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1046,8 +1046,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1087,8 +1087,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 3 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1124,8 +1124,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1165,8 +1165,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1202,8 +1202,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1243,8 +1243,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1280,8 +1280,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1321,8 +1321,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1358,8 +1358,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1399,8 +1399,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1436,8 +1436,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1477,8 +1477,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1514,8 +1514,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1555,8 +1555,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1579,8 +1579,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1608,8 +1608,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1632,8 +1632,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1661,8 +1661,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1685,8 +1685,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1714,8 +1714,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 3 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1738,8 +1738,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1767,8 +1767,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 3 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1791,8 +1791,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1820,8 +1820,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1844,8 +1844,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1873,8 +1873,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1897,8 +1897,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1926,8 +1926,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1950,8 +1950,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1979,8 +1979,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2003,8 +2003,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2032,8 +2032,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2056,8 +2056,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2088,8 +2088,8 @@ ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a4 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32IA-NEXT: mv a1, sp ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a4 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2117,8 +2117,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2147,10 +2147,10 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a5 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2164,10 +2164,10 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 2 +; RV32IA-NEXT: li a4, 2 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a5 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2179,8 +2179,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2209,8 +2209,8 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 2 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt @@ -2226,8 +2226,8 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 2 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 2 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt @@ -2241,8 +2241,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2271,10 +2271,10 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 3 +; RV32I-NEXT: li a4, 3 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a5 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2288,10 +2288,10 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 3 +; RV32IA-NEXT: li a4, 3 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a5 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2303,8 +2303,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2333,8 +2333,8 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 3 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 3 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt @@ -2350,8 +2350,8 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 3 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 3 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt @@ -2365,8 +2365,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 3 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2395,10 +2395,10 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 4 +; RV32I-NEXT: li a4, 4 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a5 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2412,10 +2412,10 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 4 +; RV32IA-NEXT: li a4, 4 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a5 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2427,8 +2427,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2457,8 +2457,8 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 4 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 4 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt @@ -2474,8 +2474,8 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 4 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 4 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt @@ -2489,8 +2489,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2519,10 +2519,10 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a5 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2536,10 +2536,10 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 5 +; RV32IA-NEXT: li a4, 5 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a5 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2551,8 +2551,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2581,8 +2581,8 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt @@ -2598,8 +2598,8 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 5 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 5 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt @@ -2613,8 +2613,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2643,8 +2643,8 @@ ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 5 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 5 ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt @@ -2660,8 +2660,8 @@ ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 5 -; RV32IA-NEXT: addi a5, zero, 5 +; RV32IA-NEXT: li a4, 5 +; RV32IA-NEXT: li a5, 5 ; RV32IA-NEXT: mv a2, a3 ; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt @@ -2675,8 +2675,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll --- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll @@ -13,7 +13,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -28,7 +28,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -47,7 +47,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -62,7 +62,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -81,7 +81,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 2 +; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -97,7 +97,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -117,7 +117,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -134,7 +134,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -155,7 +155,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -170,7 +170,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -189,7 +189,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -204,7 +204,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -223,7 +223,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 2 +; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -239,7 +239,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -259,7 +259,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -276,7 +276,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -297,7 +297,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -312,7 +312,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -331,7 +331,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -346,7 +346,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -365,7 +365,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 2 +; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -381,7 +381,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -401,7 +401,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -418,7 +418,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -439,7 +439,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -449,7 +449,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a1, zero +; RV32IA-NEXT: li a1, 0 ; RV32IA-NEXT: call __atomic_load_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -459,7 +459,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -478,7 +478,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -488,7 +488,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a1, zero +; RV32IA-NEXT: li a1, 0 ; RV32IA-NEXT: call __atomic_load_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -498,7 +498,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -517,7 +517,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 2 +; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -527,7 +527,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a1, zero, 2 +; RV32IA-NEXT: li a1, 2 ; RV32IA-NEXT: call __atomic_load_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -537,7 +537,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -557,7 +557,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -567,7 +567,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a1, zero, 5 +; RV32IA-NEXT: li a1, 5 ; RV32IA-NEXT: call __atomic_load_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -577,7 +577,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -598,7 +598,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_store_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -613,7 +613,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -632,7 +632,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_store_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -647,7 +647,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -666,7 +666,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_store_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -682,7 +682,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_store_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -702,7 +702,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_store_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -718,7 +718,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_store_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -738,7 +738,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_store_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -753,7 +753,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -772,7 +772,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_store_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -787,7 +787,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -806,7 +806,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_store_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -822,7 +822,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_store_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -842,7 +842,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_store_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -858,7 +858,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_store_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -878,7 +878,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_store_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -893,7 +893,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -912,7 +912,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_store_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -927,7 +927,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -946,7 +946,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_store_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -962,7 +962,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_store_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -982,7 +982,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_store_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -998,7 +998,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_store_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1018,7 +1018,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_store_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1028,7 +1028,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_store_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -1038,7 +1038,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1057,7 +1057,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_store_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1067,7 +1067,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_store_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -1077,7 +1077,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_store_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1096,7 +1096,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_store_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1106,7 +1106,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_store_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -1116,7 +1116,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_store_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1136,7 +1136,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_store_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1146,7 +1146,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_store_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -1156,7 +1156,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_store_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -13,7 +13,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -23,7 +23,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -43,7 +43,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -53,7 +53,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -77,7 +77,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -87,7 +87,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -107,7 +107,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -117,7 +117,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -141,7 +141,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -151,7 +151,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -171,7 +171,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -181,7 +181,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -205,7 +205,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -215,7 +215,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -235,7 +235,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -245,7 +245,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -269,7 +269,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_exchange_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -279,7 +279,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -299,7 +299,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_exchange_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -309,7 +309,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -333,7 +333,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_add_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -343,7 +343,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -363,7 +363,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -373,7 +373,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -397,7 +397,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_add_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -407,7 +407,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -427,7 +427,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_add_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -437,7 +437,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -461,7 +461,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_add_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -471,7 +471,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -491,7 +491,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_add_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -501,7 +501,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -525,7 +525,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_add_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -535,7 +535,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -555,7 +555,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_add_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -565,7 +565,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -589,7 +589,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_add_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -599,7 +599,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -619,7 +619,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_add_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -629,7 +629,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -653,7 +653,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_sub_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -663,7 +663,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -683,7 +683,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -693,7 +693,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -717,7 +717,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_sub_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -727,7 +727,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -747,7 +747,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_sub_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -757,7 +757,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -781,7 +781,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_sub_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -791,7 +791,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -811,7 +811,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_sub_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -821,7 +821,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -845,7 +845,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_sub_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -855,7 +855,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -875,7 +875,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_sub_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -885,7 +885,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -909,7 +909,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_sub_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -919,7 +919,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -939,7 +939,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_sub_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -949,7 +949,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -973,7 +973,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_and_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -983,7 +983,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: not a3, a3 ; RV32IA-NEXT: andi a1, a1, 255 @@ -997,7 +997,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1007,7 +1007,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1025,7 +1025,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_and_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1035,7 +1035,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: not a3, a3 ; RV32IA-NEXT: andi a1, a1, 255 @@ -1049,7 +1049,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_and_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1059,7 +1059,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1077,7 +1077,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_and_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1087,7 +1087,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: not a3, a3 ; RV32IA-NEXT: andi a1, a1, 255 @@ -1101,7 +1101,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_and_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1111,7 +1111,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1129,7 +1129,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_and_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1139,7 +1139,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: not a3, a3 ; RV32IA-NEXT: andi a1, a1, 255 @@ -1153,7 +1153,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_and_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1163,7 +1163,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1181,7 +1181,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_and_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1191,7 +1191,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: not a3, a3 ; RV32IA-NEXT: andi a1, a1, 255 @@ -1205,7 +1205,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_and_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1215,7 +1215,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1233,7 +1233,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_nand_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1243,7 +1243,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -1264,7 +1264,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1274,7 +1274,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1299,7 +1299,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_nand_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1309,7 +1309,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -1330,7 +1330,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_nand_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1340,7 +1340,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1365,7 +1365,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_nand_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1375,7 +1375,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -1396,7 +1396,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_nand_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1406,7 +1406,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1431,7 +1431,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_nand_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1441,7 +1441,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -1462,7 +1462,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_nand_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1472,7 +1472,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1497,7 +1497,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_nand_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1507,7 +1507,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -1528,7 +1528,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_nand_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1538,7 +1538,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1563,7 +1563,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_or_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1583,7 +1583,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1607,7 +1607,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_or_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1627,7 +1627,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_or_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1651,7 +1651,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_or_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1671,7 +1671,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_or_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1695,7 +1695,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_or_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1715,7 +1715,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_or_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1739,7 +1739,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_or_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1759,7 +1759,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_or_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1783,7 +1783,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_xor_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1803,7 +1803,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1827,7 +1827,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_xor_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1847,7 +1847,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_xor_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1871,7 +1871,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_xor_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1891,7 +1891,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_xor_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1915,7 +1915,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_xor_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1935,7 +1935,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_xor_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1959,7 +1959,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_xor_1@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1979,7 +1979,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_xor_1@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2017,8 +2017,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB35_4 @@ -2046,12 +2046,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -2089,8 +2089,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB35_4 @@ -2118,12 +2118,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -2164,8 +2164,8 @@ ; RV32I-NEXT: # in Loop: Header=BB36_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -2194,12 +2194,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -2236,8 +2236,8 @@ ; RV64I-NEXT: # in Loop: Header=BB36_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -2266,12 +2266,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -2312,9 +2312,9 @@ ; RV32I-NEXT: # in Loop: Header=BB37_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB37_4 @@ -2342,12 +2342,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -2384,9 +2384,9 @@ ; RV64I-NEXT: # in Loop: Header=BB37_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB37_4 @@ -2414,12 +2414,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -2460,8 +2460,8 @@ ; RV32I-NEXT: # in Loop: Header=BB38_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -2490,12 +2490,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -2532,8 +2532,8 @@ ; RV64I-NEXT: # in Loop: Header=BB38_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -2562,12 +2562,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -2608,8 +2608,8 @@ ; RV32I-NEXT: # in Loop: Header=BB39_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -2638,12 +2638,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aqrl a5, (a6) @@ -2680,8 +2680,8 @@ ; RV64I-NEXT: # in Loop: Header=BB39_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -2710,12 +2710,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aqrl a5, (a6) @@ -2757,8 +2757,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB40_4 @@ -2786,12 +2786,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -2829,8 +2829,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB40_4 @@ -2858,12 +2858,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -2904,8 +2904,8 @@ ; RV32I-NEXT: # in Loop: Header=BB41_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -2934,12 +2934,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -2976,8 +2976,8 @@ ; RV64I-NEXT: # in Loop: Header=BB41_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -3006,12 +3006,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -3052,9 +3052,9 @@ ; RV32I-NEXT: # in Loop: Header=BB42_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB42_4 @@ -3082,12 +3082,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -3124,9 +3124,9 @@ ; RV64I-NEXT: # in Loop: Header=BB42_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB42_4 @@ -3154,12 +3154,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -3200,8 +3200,8 @@ ; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -3230,12 +3230,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -3272,8 +3272,8 @@ ; RV64I-NEXT: # in Loop: Header=BB43_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -3302,12 +3302,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -3348,8 +3348,8 @@ ; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -3378,12 +3378,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aqrl a5, (a6) @@ -3420,8 +3420,8 @@ ; RV64I-NEXT: # in Loop: Header=BB44_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -3450,12 +3450,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aqrl a5, (a6) @@ -3496,8 +3496,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB45_4 @@ -3523,7 +3523,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -3560,8 +3560,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB45_4 @@ -3587,7 +3587,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -3627,8 +3627,8 @@ ; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -3655,7 +3655,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -3691,8 +3691,8 @@ ; RV64I-NEXT: # in Loop: Header=BB46_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -3719,7 +3719,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -3759,9 +3759,9 @@ ; RV32I-NEXT: # in Loop: Header=BB47_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB47_4 @@ -3787,7 +3787,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -3823,9 +3823,9 @@ ; RV64I-NEXT: # in Loop: Header=BB47_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB47_4 @@ -3851,7 +3851,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -3891,8 +3891,8 @@ ; RV32I-NEXT: # in Loop: Header=BB48_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -3919,7 +3919,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -3955,8 +3955,8 @@ ; RV64I-NEXT: # in Loop: Header=BB48_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -3983,7 +3983,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4023,8 +4023,8 @@ ; RV32I-NEXT: # in Loop: Header=BB49_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -4051,7 +4051,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -4087,8 +4087,8 @@ ; RV64I-NEXT: # in Loop: Header=BB49_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -4115,7 +4115,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4156,8 +4156,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB50_4 @@ -4183,7 +4183,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -4220,8 +4220,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB50_4 @@ -4247,7 +4247,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4287,8 +4287,8 @@ ; RV32I-NEXT: # in Loop: Header=BB51_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -4315,7 +4315,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -4351,8 +4351,8 @@ ; RV64I-NEXT: # in Loop: Header=BB51_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -4379,7 +4379,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4419,9 +4419,9 @@ ; RV32I-NEXT: # in Loop: Header=BB52_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB52_4 @@ -4447,7 +4447,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -4483,9 +4483,9 @@ ; RV64I-NEXT: # in Loop: Header=BB52_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB52_4 @@ -4511,7 +4511,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4551,8 +4551,8 @@ ; RV32I-NEXT: # in Loop: Header=BB53_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -4579,7 +4579,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -4615,8 +4615,8 @@ ; RV64I-NEXT: # in Loop: Header=BB53_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -4643,7 +4643,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4683,8 +4683,8 @@ ; RV32I-NEXT: # in Loop: Header=BB54_2 Depth=1 ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) @@ -4711,7 +4711,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -4747,8 +4747,8 @@ ; RV64I-NEXT: # in Loop: Header=BB54_2 Depth=1 ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) @@ -4775,7 +4775,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -4803,7 +4803,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -4834,7 +4834,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -4869,7 +4869,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -4900,7 +4900,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -4935,7 +4935,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -4966,7 +4966,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5001,7 +5001,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5032,7 +5032,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5067,7 +5067,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_exchange_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5098,7 +5098,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_exchange_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5133,7 +5133,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_add_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5164,7 +5164,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5199,7 +5199,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_add_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5230,7 +5230,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_add_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5265,7 +5265,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_add_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5296,7 +5296,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_add_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5331,7 +5331,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_add_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5362,7 +5362,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_add_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5397,7 +5397,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_add_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5428,7 +5428,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_add_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5463,7 +5463,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_sub_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5494,7 +5494,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5529,7 +5529,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_sub_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5560,7 +5560,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_sub_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5595,7 +5595,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_sub_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5626,7 +5626,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_sub_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5661,7 +5661,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_sub_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5692,7 +5692,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_sub_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5727,7 +5727,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_sub_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5758,7 +5758,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_sub_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5793,7 +5793,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_and_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5818,7 +5818,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5847,7 +5847,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_and_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5872,7 +5872,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_and_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5901,7 +5901,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_and_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5926,7 +5926,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_and_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -5955,7 +5955,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_and_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -5980,7 +5980,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_and_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6009,7 +6009,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_and_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6034,7 +6034,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_and_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6063,7 +6063,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_nand_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6095,7 +6095,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6131,7 +6131,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_nand_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6163,7 +6163,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_nand_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6199,7 +6199,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_nand_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6231,7 +6231,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_nand_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6267,7 +6267,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_nand_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6299,7 +6299,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_nand_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6335,7 +6335,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_nand_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6367,7 +6367,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_nand_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6403,7 +6403,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_or_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6425,7 +6425,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6451,7 +6451,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_or_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6473,7 +6473,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_or_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6499,7 +6499,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_or_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6521,7 +6521,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_or_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6547,7 +6547,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_or_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6569,7 +6569,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_or_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6595,7 +6595,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_or_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6617,7 +6617,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_or_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6643,7 +6643,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_xor_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6665,7 +6665,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6691,7 +6691,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_xor_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6713,7 +6713,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_xor_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6739,7 +6739,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_xor_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6761,7 +6761,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_xor_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6787,7 +6787,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_xor_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6809,7 +6809,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_xor_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6835,7 +6835,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_xor_2@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -6857,7 +6857,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_xor_2@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -6897,8 +6897,8 @@ ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) ; RV32I-NEXT: bnez a0, .LBB90_4 @@ -6932,7 +6932,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -6970,8 +6970,8 @@ ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) ; RV64I-NEXT: bnez a0, .LBB90_4 @@ -7005,7 +7005,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -7046,8 +7046,8 @@ ; RV32I-NEXT: # in Loop: Header=BB91_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) @@ -7082,7 +7082,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -7119,8 +7119,8 @@ ; RV64I-NEXT: # in Loop: Header=BB91_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) @@ -7155,7 +7155,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -7196,9 +7196,9 @@ ; RV32I-NEXT: # in Loop: Header=BB92_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) ; RV32I-NEXT: bnez a0, .LBB92_4 @@ -7232,7 +7232,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -7269,9 +7269,9 @@ ; RV64I-NEXT: # in Loop: Header=BB92_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) ; RV64I-NEXT: bnez a0, .LBB92_4 @@ -7305,7 +7305,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -7346,8 +7346,8 @@ ; RV32I-NEXT: # in Loop: Header=BB93_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) @@ -7382,7 +7382,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -7419,8 +7419,8 @@ ; RV64I-NEXT: # in Loop: Header=BB93_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) @@ -7455,7 +7455,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -7496,8 +7496,8 @@ ; RV32I-NEXT: # in Loop: Header=BB94_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) @@ -7532,7 +7532,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aqrl a5, (a6) @@ -7569,8 +7569,8 @@ ; RV64I-NEXT: # in Loop: Header=BB94_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) @@ -7605,7 +7605,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aqrl a5, (a6) @@ -7647,8 +7647,8 @@ ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) ; RV32I-NEXT: bnez a0, .LBB95_4 @@ -7682,7 +7682,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -7720,8 +7720,8 @@ ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) ; RV64I-NEXT: bnez a0, .LBB95_4 @@ -7755,7 +7755,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -7796,8 +7796,8 @@ ; RV32I-NEXT: # in Loop: Header=BB96_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) @@ -7832,7 +7832,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -7869,8 +7869,8 @@ ; RV64I-NEXT: # in Loop: Header=BB96_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) @@ -7905,7 +7905,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -7946,9 +7946,9 @@ ; RV32I-NEXT: # in Loop: Header=BB97_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) ; RV32I-NEXT: bnez a0, .LBB97_4 @@ -7982,7 +7982,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -8019,9 +8019,9 @@ ; RV64I-NEXT: # in Loop: Header=BB97_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) ; RV64I-NEXT: bnez a0, .LBB97_4 @@ -8055,7 +8055,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -8096,8 +8096,8 @@ ; RV32I-NEXT: # in Loop: Header=BB98_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) @@ -8132,7 +8132,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a5, (a6) @@ -8169,8 +8169,8 @@ ; RV64I-NEXT: # in Loop: Header=BB98_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) @@ -8205,7 +8205,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a5, (a6) @@ -8246,8 +8246,8 @@ ; RV32I-NEXT: # in Loop: Header=BB99_2 Depth=1 ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) @@ -8282,7 +8282,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aqrl a5, (a6) @@ -8319,8 +8319,8 @@ ; RV64I-NEXT: # in Loop: Header=BB99_2 Depth=1 ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) @@ -8355,7 +8355,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aqrl a5, (a6) @@ -8399,8 +8399,8 @@ ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) ; RV32I-NEXT: bnez a0, .LBB100_4 @@ -8468,8 +8468,8 @@ ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: mv a0, s3 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) ; RV64I-NEXT: bnez a0, .LBB100_4 @@ -8540,8 +8540,8 @@ ; RV32I-NEXT: # in Loop: Header=BB101_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) @@ -8609,8 +8609,8 @@ ; RV64I-NEXT: # in Loop: Header=BB101_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) @@ -8682,9 +8682,9 @@ ; RV32I-NEXT: # in Loop: Header=BB102_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) ; RV32I-NEXT: bnez a0, .LBB102_4 @@ -8751,9 +8751,9 @@ ; RV64I-NEXT: # in Loop: Header=BB102_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s3 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) ; RV64I-NEXT: bnez a0, .LBB102_4 @@ -8824,8 +8824,8 @@ ; RV32I-NEXT: # in Loop: Header=BB103_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) @@ -8893,8 +8893,8 @@ ; RV64I-NEXT: # in Loop: Header=BB103_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) @@ -8966,8 +8966,8 @@ ; RV32I-NEXT: # in Loop: Header=BB104_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) @@ -9035,8 +9035,8 @@ ; RV64I-NEXT: # in Loop: Header=BB104_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) @@ -9109,8 +9109,8 @@ ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) ; RV32I-NEXT: bnez a0, .LBB105_4 @@ -9178,8 +9178,8 @@ ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: mv a0, s3 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) ; RV64I-NEXT: bnez a0, .LBB105_4 @@ -9250,8 +9250,8 @@ ; RV32I-NEXT: # in Loop: Header=BB106_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) @@ -9319,8 +9319,8 @@ ; RV64I-NEXT: # in Loop: Header=BB106_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) @@ -9392,9 +9392,9 @@ ; RV32I-NEXT: # in Loop: Header=BB107_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) ; RV32I-NEXT: bnez a0, .LBB107_4 @@ -9461,9 +9461,9 @@ ; RV64I-NEXT: # in Loop: Header=BB107_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s3 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) ; RV64I-NEXT: bnez a0, .LBB107_4 @@ -9534,8 +9534,8 @@ ; RV32I-NEXT: # in Loop: Header=BB108_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) @@ -9603,8 +9603,8 @@ ; RV64I-NEXT: # in Loop: Header=BB108_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) @@ -9676,8 +9676,8 @@ ; RV32I-NEXT: # in Loop: Header=BB109_2 Depth=1 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) @@ -9745,8 +9745,8 @@ ; RV64I-NEXT: # in Loop: Header=BB109_2 Depth=1 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) @@ -9803,7 +9803,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -9818,7 +9818,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -9837,7 +9837,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -9852,7 +9852,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -9871,7 +9871,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -9886,7 +9886,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -9905,7 +9905,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -9920,7 +9920,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -9939,7 +9939,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -9954,7 +9954,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_exchange_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -9973,7 +9973,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_add_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -9988,7 +9988,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10007,7 +10007,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_add_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10022,7 +10022,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_add_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10041,7 +10041,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_add_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10056,7 +10056,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_add_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10075,7 +10075,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_add_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10090,7 +10090,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_add_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10109,7 +10109,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_add_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10124,7 +10124,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_add_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10143,7 +10143,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_sub_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10159,7 +10159,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10179,7 +10179,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_sub_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10195,7 +10195,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10215,7 +10215,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_sub_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10231,7 +10231,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10251,7 +10251,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_sub_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10267,7 +10267,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10287,7 +10287,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_sub_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10303,7 +10303,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10323,7 +10323,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_and_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10338,7 +10338,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10357,7 +10357,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_and_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10372,7 +10372,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_and_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10391,7 +10391,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_and_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10406,7 +10406,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_and_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10425,7 +10425,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_and_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10440,7 +10440,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_and_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10459,7 +10459,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_and_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10474,7 +10474,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_and_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10493,7 +10493,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_nand_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10515,7 +10515,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10541,7 +10541,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_nand_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10563,7 +10563,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10589,7 +10589,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_nand_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10611,7 +10611,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10637,7 +10637,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_nand_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10659,7 +10659,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10685,7 +10685,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_nand_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10707,7 +10707,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10733,7 +10733,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_or_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10748,7 +10748,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10767,7 +10767,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_or_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10782,7 +10782,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_or_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10801,7 +10801,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_or_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10816,7 +10816,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_or_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10835,7 +10835,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_or_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10850,7 +10850,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_or_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10869,7 +10869,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_or_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10884,7 +10884,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_or_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10903,7 +10903,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_xor_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10918,7 +10918,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10937,7 +10937,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: call __atomic_fetch_xor_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10952,7 +10952,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -10971,7 +10971,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: li a2, 3 ; RV32I-NEXT: call __atomic_fetch_xor_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -10986,7 +10986,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -11005,7 +11005,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: li a2, 4 ; RV32I-NEXT: call __atomic_fetch_xor_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -11020,7 +11020,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -11039,7 +11039,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: li a2, 5 ; RV32I-NEXT: call __atomic_fetch_xor_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -11054,7 +11054,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -11084,8 +11084,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB145_4 @@ -11127,8 +11127,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB145_4 @@ -11173,8 +11173,8 @@ ; RV32I-NEXT: # in Loop: Header=BB146_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -11216,8 +11216,8 @@ ; RV64I-NEXT: # in Loop: Header=BB146_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -11263,9 +11263,9 @@ ; RV32I-NEXT: # in Loop: Header=BB147_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB147_4 @@ -11306,9 +11306,9 @@ ; RV64I-NEXT: # in Loop: Header=BB147_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB147_4 @@ -11353,8 +11353,8 @@ ; RV32I-NEXT: # in Loop: Header=BB148_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -11396,8 +11396,8 @@ ; RV64I-NEXT: # in Loop: Header=BB148_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -11443,8 +11443,8 @@ ; RV32I-NEXT: # in Loop: Header=BB149_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -11486,8 +11486,8 @@ ; RV64I-NEXT: # in Loop: Header=BB149_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -11534,8 +11534,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB150_4 @@ -11577,8 +11577,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB150_4 @@ -11623,8 +11623,8 @@ ; RV32I-NEXT: # in Loop: Header=BB151_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -11666,8 +11666,8 @@ ; RV64I-NEXT: # in Loop: Header=BB151_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -11713,9 +11713,9 @@ ; RV32I-NEXT: # in Loop: Header=BB152_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB152_4 @@ -11756,9 +11756,9 @@ ; RV64I-NEXT: # in Loop: Header=BB152_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB152_4 @@ -11803,8 +11803,8 @@ ; RV32I-NEXT: # in Loop: Header=BB153_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -11846,8 +11846,8 @@ ; RV64I-NEXT: # in Loop: Header=BB153_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -11893,8 +11893,8 @@ ; RV32I-NEXT: # in Loop: Header=BB154_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -11936,8 +11936,8 @@ ; RV64I-NEXT: # in Loop: Header=BB154_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -11984,8 +11984,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB155_4 @@ -12027,8 +12027,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB155_4 @@ -12073,8 +12073,8 @@ ; RV32I-NEXT: # in Loop: Header=BB156_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -12116,8 +12116,8 @@ ; RV64I-NEXT: # in Loop: Header=BB156_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -12163,9 +12163,9 @@ ; RV32I-NEXT: # in Loop: Header=BB157_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB157_4 @@ -12206,9 +12206,9 @@ ; RV64I-NEXT: # in Loop: Header=BB157_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB157_4 @@ -12253,8 +12253,8 @@ ; RV32I-NEXT: # in Loop: Header=BB158_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -12296,8 +12296,8 @@ ; RV64I-NEXT: # in Loop: Header=BB158_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -12343,8 +12343,8 @@ ; RV32I-NEXT: # in Loop: Header=BB159_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -12386,8 +12386,8 @@ ; RV64I-NEXT: # in Loop: Header=BB159_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -12434,8 +12434,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB160_4 @@ -12477,8 +12477,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB160_4 @@ -12523,8 +12523,8 @@ ; RV32I-NEXT: # in Loop: Header=BB161_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 2 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -12566,8 +12566,8 @@ ; RV64I-NEXT: # in Loop: Header=BB161_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -12613,9 +12613,9 @@ ; RV32I-NEXT: # in Loop: Header=BB162_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB162_4 @@ -12656,9 +12656,9 @@ ; RV64I-NEXT: # in Loop: Header=BB162_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB162_4 @@ -12703,8 +12703,8 @@ ; RV32I-NEXT: # in Loop: Header=BB163_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -12746,8 +12746,8 @@ ; RV64I-NEXT: # in Loop: Header=BB163_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -12793,8 +12793,8 @@ ; RV32I-NEXT: # in Loop: Header=BB164_2 Depth=1 ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: li a3, 5 +; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) @@ -12836,8 +12836,8 @@ ; RV64I-NEXT: # in Loop: Header=BB164_2 Depth=1 ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) @@ -12873,7 +12873,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -12883,7 +12883,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -12893,7 +12893,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -12912,7 +12912,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -12922,7 +12922,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -12932,7 +12932,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -12951,7 +12951,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -12961,7 +12961,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -12971,7 +12971,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -12990,7 +12990,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13000,7 +13000,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13010,7 +13010,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13029,7 +13029,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13039,7 +13039,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13049,7 +13049,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13068,7 +13068,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_add_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13078,7 +13078,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_add_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13088,7 +13088,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13107,7 +13107,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_fetch_add_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13117,7 +13117,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_fetch_add_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13127,7 +13127,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_add_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13146,7 +13146,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_fetch_add_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13156,7 +13156,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_fetch_add_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13166,7 +13166,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_add_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13185,7 +13185,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_fetch_add_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13195,7 +13195,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_fetch_add_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13205,7 +13205,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_add_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13224,7 +13224,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_fetch_add_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13234,7 +13234,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_fetch_add_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13244,7 +13244,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_add_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13263,7 +13263,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_sub_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13273,7 +13273,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_sub_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13283,7 +13283,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13303,7 +13303,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_fetch_sub_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13313,7 +13313,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_fetch_sub_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13323,7 +13323,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_sub_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13343,7 +13343,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_fetch_sub_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13353,7 +13353,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_fetch_sub_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13363,7 +13363,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_sub_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13383,7 +13383,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_fetch_sub_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13393,7 +13393,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_fetch_sub_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13403,7 +13403,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_sub_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13423,7 +13423,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_fetch_sub_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13433,7 +13433,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_fetch_sub_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13443,7 +13443,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_sub_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13463,7 +13463,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_and_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13473,7 +13473,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_and_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13483,7 +13483,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13502,7 +13502,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_fetch_and_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13512,7 +13512,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_fetch_and_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13522,7 +13522,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_and_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13541,7 +13541,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_fetch_and_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13551,7 +13551,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_fetch_and_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13561,7 +13561,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_and_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13580,7 +13580,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_fetch_and_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13590,7 +13590,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_fetch_and_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13600,7 +13600,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_and_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13619,7 +13619,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_fetch_and_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13629,7 +13629,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_fetch_and_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13639,7 +13639,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_and_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13658,7 +13658,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_nand_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13668,7 +13668,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_nand_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13678,7 +13678,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13704,7 +13704,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_fetch_nand_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13714,7 +13714,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_fetch_nand_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13724,7 +13724,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_nand_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13750,7 +13750,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_fetch_nand_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13760,7 +13760,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_fetch_nand_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13770,7 +13770,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_nand_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13796,7 +13796,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_fetch_nand_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13806,7 +13806,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_fetch_nand_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13816,7 +13816,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_nand_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13842,7 +13842,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_fetch_nand_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13852,7 +13852,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_fetch_nand_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13862,7 +13862,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_nand_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13888,7 +13888,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_or_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13898,7 +13898,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_or_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13908,7 +13908,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13927,7 +13927,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_fetch_or_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13937,7 +13937,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_fetch_or_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13947,7 +13947,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_or_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -13966,7 +13966,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_fetch_or_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -13976,7 +13976,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_fetch_or_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -13986,7 +13986,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_or_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14005,7 +14005,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_fetch_or_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14015,7 +14015,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_fetch_or_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14025,7 +14025,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_or_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14044,7 +14044,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_fetch_or_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14054,7 +14054,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_fetch_or_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14064,7 +14064,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_or_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14083,7 +14083,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_xor_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14093,7 +14093,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_xor_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14103,7 +14103,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14122,7 +14122,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: li a3, 2 ; RV32I-NEXT: call __atomic_fetch_xor_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14132,7 +14132,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 2 +; RV32IA-NEXT: li a3, 2 ; RV32IA-NEXT: call __atomic_fetch_xor_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14142,7 +14142,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: call __atomic_fetch_xor_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14161,7 +14161,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: li a3, 3 ; RV32I-NEXT: call __atomic_fetch_xor_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14171,7 +14171,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 3 +; RV32IA-NEXT: li a3, 3 ; RV32IA-NEXT: call __atomic_fetch_xor_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14181,7 +14181,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: call __atomic_fetch_xor_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14200,7 +14200,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: li a3, 4 ; RV32I-NEXT: call __atomic_fetch_xor_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14210,7 +14210,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 4 +; RV32IA-NEXT: li a3, 4 ; RV32IA-NEXT: call __atomic_fetch_xor_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14220,7 +14220,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 4 +; RV64I-NEXT: li a2, 4 ; RV64I-NEXT: call __atomic_fetch_xor_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14239,7 +14239,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: li a3, 5 ; RV32I-NEXT: call __atomic_fetch_xor_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -14249,7 +14249,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: addi a3, zero, 5 +; RV32IA-NEXT: li a3, 5 ; RV32IA-NEXT: call __atomic_fetch_xor_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -14259,7 +14259,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: li a2, 5 ; RV64I-NEXT: call __atomic_fetch_xor_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -14293,8 +14293,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -14347,8 +14347,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -14397,8 +14397,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB200_4 @@ -14445,8 +14445,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 2 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -14499,8 +14499,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 2 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 2 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -14549,8 +14549,8 @@ ; RV64I-NEXT: # in Loop: Header=BB201_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -14598,9 +14598,9 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 3 +; RV32I-NEXT: li a4, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -14652,9 +14652,9 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 3 +; RV32IA-NEXT: li a4, 3 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -14702,9 +14702,9 @@ ; RV64I-NEXT: # in Loop: Header=BB202_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB202_4 @@ -14751,8 +14751,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 4 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 4 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -14805,8 +14805,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 4 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 4 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -14855,8 +14855,8 @@ ; RV64I-NEXT: # in Loop: Header=BB203_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -14904,8 +14904,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 5 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -14958,8 +14958,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 5 -; RV32IA-NEXT: addi a5, zero, 5 +; RV32IA-NEXT: li a4, 5 +; RV32IA-NEXT: li a5, 5 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -15008,8 +15008,8 @@ ; RV64I-NEXT: # in Loop: Header=BB204_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -15058,8 +15058,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -15113,8 +15113,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -15164,8 +15164,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB205_4 @@ -15212,8 +15212,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 2 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -15267,8 +15267,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 2 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 2 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -15318,8 +15318,8 @@ ; RV64I-NEXT: # in Loop: Header=BB206_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -15367,9 +15367,9 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 3 +; RV32I-NEXT: li a4, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -15422,9 +15422,9 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 3 +; RV32IA-NEXT: li a4, 3 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -15473,9 +15473,9 @@ ; RV64I-NEXT: # in Loop: Header=BB207_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB207_4 @@ -15522,8 +15522,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 4 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 4 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -15577,8 +15577,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 4 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 4 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -15628,8 +15628,8 @@ ; RV64I-NEXT: # in Loop: Header=BB208_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -15677,8 +15677,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 5 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -15732,8 +15732,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 5 -; RV32IA-NEXT: addi a5, zero, 5 +; RV32IA-NEXT: li a4, 5 +; RV32IA-NEXT: li a5, 5 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -15783,8 +15783,8 @@ ; RV64I-NEXT: # in Loop: Header=BB209_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -15833,8 +15833,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -15887,8 +15887,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -15937,8 +15937,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB210_4 @@ -15985,8 +15985,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 2 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -16039,8 +16039,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 2 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 2 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -16089,8 +16089,8 @@ ; RV64I-NEXT: # in Loop: Header=BB211_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -16138,9 +16138,9 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 3 +; RV32I-NEXT: li a4, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -16192,9 +16192,9 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 3 +; RV32IA-NEXT: li a4, 3 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -16242,9 +16242,9 @@ ; RV64I-NEXT: # in Loop: Header=BB212_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB212_4 @@ -16291,8 +16291,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 4 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 4 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -16345,8 +16345,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 4 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 4 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -16395,8 +16395,8 @@ ; RV64I-NEXT: # in Loop: Header=BB213_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -16444,8 +16444,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 5 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -16498,8 +16498,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 5 -; RV32IA-NEXT: addi a5, zero, 5 +; RV32IA-NEXT: li a4, 5 +; RV32IA-NEXT: li a5, 5 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -16548,8 +16548,8 @@ ; RV64I-NEXT: # in Loop: Header=BB214_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -16598,8 +16598,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -16653,8 +16653,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -16704,8 +16704,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB215_4 @@ -16752,8 +16752,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 2 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -16807,8 +16807,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 2 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 2 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -16858,8 +16858,8 @@ ; RV64I-NEXT: # in Loop: Header=BB216_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 2 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -16907,9 +16907,9 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 3 +; RV32I-NEXT: li a4, 3 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -16962,9 +16962,9 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 3 +; RV32IA-NEXT: li a4, 3 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -17013,9 +17013,9 @@ ; RV64I-NEXT: # in Loop: Header=BB217_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: li a3, 3 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB217_4 @@ -17062,8 +17062,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 4 -; RV32I-NEXT: addi a5, zero, 2 +; RV32I-NEXT: li a4, 4 +; RV32I-NEXT: li a5, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -17117,8 +17117,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 4 -; RV32IA-NEXT: addi a5, zero, 2 +; RV32IA-NEXT: li a4, 4 +; RV32IA-NEXT: li a5, 2 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -17168,8 +17168,8 @@ ; RV64I-NEXT: # in Loop: Header=BB218_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) @@ -17217,8 +17217,8 @@ ; RV32I-NEXT: sw a4, 8(sp) ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 5 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) @@ -17272,8 +17272,8 @@ ; RV32IA-NEXT: sw a4, 8(sp) ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 -; RV32IA-NEXT: addi a4, zero, 5 -; RV32IA-NEXT: addi a5, zero, 5 +; RV32IA-NEXT: li a4, 5 +; RV32IA-NEXT: li a5, 5 ; RV32IA-NEXT: mv a0, s0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) @@ -17323,8 +17323,8 @@ ; RV64I-NEXT: # in Loop: Header=BB219_2 Depth=1 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: li a3, 5 +; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -13,7 +13,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -30,7 +30,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -51,7 +51,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -68,7 +68,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -89,7 +89,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -104,7 +104,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -125,7 +125,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_exchange_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -137,7 +137,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -159,7 +159,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -171,7 +171,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -197,7 +197,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_add_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -209,7 +209,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -231,7 +231,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -243,7 +243,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -269,7 +269,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_sub_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -281,7 +281,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -303,7 +303,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -315,7 +315,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -341,7 +341,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_and_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -353,7 +353,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: not a3, a3 ; RV32IA-NEXT: andi a1, a1, 255 @@ -369,7 +369,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -381,7 +381,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: andi a1, a1, 255 @@ -401,7 +401,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_nand_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -413,7 +413,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a2, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -436,7 +436,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -448,7 +448,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -475,7 +475,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_or_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -499,7 +499,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -527,7 +527,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_xor_1@plt ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 @@ -551,7 +551,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_1@plt ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -593,8 +593,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB10_4 @@ -623,12 +623,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -668,8 +668,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB10_4 @@ -698,12 +698,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -747,8 +747,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB11_4 @@ -777,12 +777,12 @@ ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 ; RV32IA-NEXT: andi a3, a0, 24 -; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: li a4, 255 ; RV32IA-NEXT: sll a7, a4, a0 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 24 +; RV32IA-NEXT: li a5, 24 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -822,8 +822,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB11_4 @@ -852,12 +852,12 @@ ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 -; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a7, a4, a0 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 56 +; RV64IA-NEXT: li a5, 56 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -900,8 +900,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB12_4 @@ -928,7 +928,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -967,8 +967,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB12_4 @@ -995,7 +995,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1038,8 +1038,8 @@ ; RV32I-NEXT: sb a3, 15(sp) ; RV32I-NEXT: addi a1, sp, 15 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt ; RV32I-NEXT: lb a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB13_4 @@ -1066,7 +1066,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: andi a6, a0, -4 ; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: addi a3, zero, 255 +; RV32IA-NEXT: li a3, 255 ; RV32IA-NEXT: sll a3, a3, a0 ; RV32IA-NEXT: andi a1, a1, 255 ; RV32IA-NEXT: sll a1, a1, a0 @@ -1105,8 +1105,8 @@ ; RV64I-NEXT: sb a3, 15(sp) ; RV64I-NEXT: addi a1, sp, 15 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt ; RV64I-NEXT: lb a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB13_4 @@ -1133,7 +1133,7 @@ ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 ; RV64IA-NEXT: slliw a0, a0, 3 -; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1163,7 +1163,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_exchange_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1198,7 +1198,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1237,7 +1237,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_add_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1272,7 +1272,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1311,7 +1311,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_sub_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1346,7 +1346,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1385,7 +1385,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_and_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1414,7 +1414,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1447,7 +1447,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_nand_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1483,7 +1483,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1523,7 +1523,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_or_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1549,7 +1549,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1579,7 +1579,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_xor_2@plt ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 @@ -1605,7 +1605,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_2@plt ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1649,8 +1649,8 @@ ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) ; RV32I-NEXT: bnez a0, .LBB21_4 @@ -1685,7 +1685,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -1725,8 +1725,8 @@ ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) ; RV64I-NEXT: bnez a0, .LBB21_4 @@ -1761,7 +1761,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -1805,8 +1805,8 @@ ; RV32I-NEXT: sh a3, 14(sp) ; RV32I-NEXT: addi a1, sp, 14 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a3, 14(sp) ; RV32I-NEXT: bnez a0, .LBB22_4 @@ -1841,7 +1841,7 @@ ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a5, zero, 16 +; RV32IA-NEXT: li a5, 16 ; RV32IA-NEXT: sub a3, a5, a3 ; RV32IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a5, (a6) @@ -1881,8 +1881,8 @@ ; RV64I-NEXT: sh a3, 14(sp) ; RV64I-NEXT: addi a1, sp, 14 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a3, 14(sp) ; RV64I-NEXT: bnez a0, .LBB22_4 @@ -1917,7 +1917,7 @@ ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a5, zero, 48 +; RV64IA-NEXT: li a5, 48 ; RV64IA-NEXT: sub a3, a5, a3 ; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a5, (a6) @@ -1963,8 +1963,8 @@ ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) ; RV32I-NEXT: bnez a0, .LBB23_4 @@ -2035,8 +2035,8 @@ ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: mv a0, s3 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) ; RV64I-NEXT: bnez a0, .LBB23_4 @@ -2111,8 +2111,8 @@ ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_2@plt ; RV32I-NEXT: lh a1, 10(sp) ; RV32I-NEXT: bnez a0, .LBB24_4 @@ -2183,8 +2183,8 @@ ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: mv a0, s3 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_2@plt ; RV64I-NEXT: lh a1, 6(sp) ; RV64I-NEXT: bnez a0, .LBB24_4 @@ -2243,7 +2243,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_exchange_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2258,7 +2258,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2278,7 +2278,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_add_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2293,7 +2293,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2313,7 +2313,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_sub_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2329,7 +2329,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2350,7 +2350,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_and_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2365,7 +2365,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2385,7 +2385,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_nand_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2407,7 +2407,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2434,7 +2434,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_or_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2449,7 +2449,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2469,7 +2469,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __atomic_fetch_xor_4@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2484,7 +2484,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -2515,8 +2515,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB32_4 @@ -2558,8 +2558,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB32_4 @@ -2605,8 +2605,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB33_4 @@ -2648,8 +2648,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB33_4 @@ -2695,8 +2695,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB34_4 @@ -2738,8 +2738,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB34_4 @@ -2785,8 +2785,8 @@ ; RV32I-NEXT: sw a3, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_4@plt ; RV32I-NEXT: lw a3, 0(sp) ; RV32I-NEXT: bnez a0, .LBB35_4 @@ -2828,8 +2828,8 @@ ; RV64I-NEXT: sw a3, 12(sp) ; RV64I-NEXT: addi a1, sp, 12 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_4@plt ; RV64I-NEXT: lw a3, 12(sp) ; RV64I-NEXT: bnez a0, .LBB35_4 @@ -2864,7 +2864,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_exchange_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2874,7 +2874,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_exchange_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2884,7 +2884,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2903,7 +2903,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_add_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2913,7 +2913,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_add_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2923,7 +2923,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2942,7 +2942,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_sub_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2952,7 +2952,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_sub_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -2962,7 +2962,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -2982,7 +2982,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_and_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -2992,7 +2992,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_and_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -3002,7 +3002,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -3021,7 +3021,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_nand_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -3031,7 +3031,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_nand_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -3041,7 +3041,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -3067,7 +3067,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_or_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -3077,7 +3077,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_or_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -3087,7 +3087,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -3106,7 +3106,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __atomic_fetch_xor_8@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -3116,7 +3116,7 @@ ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IA-NEXT: mv a3, zero +; RV32IA-NEXT: li a3, 0 ; RV32IA-NEXT: call __atomic_fetch_xor_8@plt ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 @@ -3126,7 +3126,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_8@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -3160,8 +3160,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -3214,8 +3214,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -3264,8 +3264,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB43_4 @@ -3313,8 +3313,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -3368,8 +3368,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -3419,8 +3419,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB44_4 @@ -3468,8 +3468,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -3522,8 +3522,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -3572,8 +3572,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB45_4 @@ -3621,8 +3621,8 @@ ; RV32I-NEXT: sw a5, 12(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a4, zero -; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: li a4, 0 +; RV32I-NEXT: li a5, 0 ; RV32I-NEXT: call __atomic_compare_exchange_8@plt ; RV32I-NEXT: lw a5, 12(sp) ; RV32I-NEXT: lw a4, 8(sp) @@ -3676,8 +3676,8 @@ ; RV32IA-NEXT: sw a5, 12(sp) ; RV32IA-NEXT: addi a1, sp, 8 ; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a4, zero -; RV32IA-NEXT: mv a5, zero +; RV32IA-NEXT: li a4, 0 +; RV32IA-NEXT: li a5, 0 ; RV32IA-NEXT: call __atomic_compare_exchange_8@plt ; RV32IA-NEXT: lw a5, 12(sp) ; RV32IA-NEXT: lw a4, 8(sp) @@ -3727,8 +3727,8 @@ ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a3, 0 +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_8@plt ; RV64I-NEXT: ld a3, 0(sp) ; RV64I-NEXT: bnez a0, .LBB46_4 diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll --- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll +++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll @@ -42,12 +42,12 @@ ; CHECK-NEXT: #APP ; CHECK-NEXT: .zero 1048576 ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: # %jmp ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: ret br i1 %a, label %iftrue, label %jmp diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -104,7 +104,7 @@ ; RV64I-NEXT: lui a2, 4080 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a2, a0, 8 -; RV64I-NEXT: addi a3, zero, 255 +; RV64I-NEXT: li a3, 255 ; RV64I-NEXT: slli a4, a3, 24 ; RV64I-NEXT: and a2, a2, a4 ; RV64I-NEXT: or a1, a2, a1 @@ -154,7 +154,7 @@ ; RV32I-NEXT: andi a0, a0, 15 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB3_2: -; RV32I-NEXT: addi a0, zero, 8 +; RV32I-NEXT: li a0, 8 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_cttz_i8: @@ -177,7 +177,7 @@ ; RV64I-NEXT: andi a0, a0, 15 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB3_2: -; RV64I-NEXT: addi a0, zero, 8 +; RV64I-NEXT: li a0, 8 ; RV64I-NEXT: ret %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false) ret i8 %tmp @@ -218,7 +218,7 @@ ; RV32I-NEXT: srli a0, a0, 8 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: -; RV32I-NEXT: addi a0, zero, 16 +; RV32I-NEXT: li a0, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_cttz_i16: @@ -255,7 +255,7 @@ ; RV64I-NEXT: srli a0, a0, 8 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_2: -; RV64I-NEXT: addi a0, zero, 16 +; RV64I-NEXT: li a0, 16 ; RV64I-NEXT: ret %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false) ret i16 %tmp @@ -295,7 +295,7 @@ ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB5_2: -; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: li a0, 32 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_cttz_i32: @@ -332,7 +332,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB5_2: -; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) ret i32 %tmp @@ -380,7 +380,7 @@ ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB6_2: -; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: li a0, 32 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_ctlz_i32: @@ -425,7 +425,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) ret i32 %tmp @@ -492,7 +492,7 @@ ; RV32I-NEXT: .LBB7_2: ; RV32I-NEXT: srli a0, s2, 24 ; RV32I-NEXT: .LBB7_3: -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload @@ -559,7 +559,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB7_2: -; RV64I-NEXT: addi a0, zero, 64 +; RV64I-NEXT: li a0, 64 ; RV64I-NEXT: ret %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false) ret i64 %tmp @@ -792,7 +792,7 @@ ; RV32I-NEXT: .LBB11_2: ; RV32I-NEXT: srli a0, s2, 24 ; RV32I-NEXT: .LBB11_3: -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload @@ -967,7 +967,7 @@ ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: add a0, a0, s5 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload @@ -1083,7 +1083,7 @@ ; RV32I-NEXT: srli a1, a0, 1 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i64: diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll @@ -116,7 +116,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: lui a1, 4 ; RV32I-NEXT: call callee_half_in_regs@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -127,7 +127,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: lui a1, 4 ; RV64I-NEXT: call callee_half_in_regs@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -138,7 +138,7 @@ ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: addi a0, zero, 1 +; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: lui a1, 1048564 ; RV32IF-NEXT: call callee_half_in_regs@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -152,7 +152,7 @@ ; RV64IF-NEXT: lui a0, %hi(.LCPI1_0) ; RV64IF-NEXT: flw ft0, %lo(.LCPI1_0)(a0) ; RV64IF-NEXT: fmv.x.w a1, ft0 -; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: call callee_half_in_regs@plt ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -164,7 +164,7 @@ ; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI1_0) ; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI1_0)(a0) -; RV32-ILP32F-NEXT: addi a0, zero, 1 +; RV32-ILP32F-NEXT: li a0, 1 ; RV32-ILP32F-NEXT: call callee_half_in_regs@plt ; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32F-NEXT: addi sp, sp, 16 @@ -176,7 +176,7 @@ ; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-LP64F-NEXT: lui a0, %hi(.LCPI1_0) ; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI1_0)(a0) -; RV64-LP64F-NEXT: addi a0, zero, 1 +; RV64-LP64F-NEXT: li a0, 1 ; RV64-LP64F-NEXT: call callee_half_in_regs@plt ; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-LP64F-NEXT: addi sp, sp, 16 @@ -289,14 +289,14 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi t0, a0, -1792 -; RV32I-NEXT: addi a0, zero, 1 -; RV32I-NEXT: addi a1, zero, 2 -; RV32I-NEXT: addi a2, zero, 3 -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: addi a5, zero, 6 -; RV32I-NEXT: addi a6, zero, 7 -; RV32I-NEXT: addi a7, zero, 8 +; RV32I-NEXT: li a0, 1 +; RV32I-NEXT: li a1, 2 +; RV32I-NEXT: li a2, 3 +; RV32I-NEXT: li a3, 4 +; RV32I-NEXT: li a4, 5 +; RV32I-NEXT: li a5, 6 +; RV32I-NEXT: li a6, 7 +; RV32I-NEXT: li a7, 8 ; RV32I-NEXT: sw t0, 0(sp) ; RV32I-NEXT: call callee_half_on_stack@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -309,14 +309,14 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw t0, a0, -1792 -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: addi a2, zero, 3 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: addi a5, zero, 6 -; RV64I-NEXT: addi a6, zero, 7 -; RV64I-NEXT: addi a7, zero, 8 +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 2 +; RV64I-NEXT: li a2, 3 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 5 +; RV64I-NEXT: li a5, 6 +; RV64I-NEXT: li a6, 7 +; RV64I-NEXT: li a7, 8 ; RV64I-NEXT: sd t0, 0(sp) ; RV64I-NEXT: call callee_half_on_stack@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -329,14 +329,14 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: lui a0, 1048565 ; RV32IF-NEXT: addi t0, a0, -1792 -; RV32IF-NEXT: addi a0, zero, 1 -; RV32IF-NEXT: addi a1, zero, 2 -; RV32IF-NEXT: addi a2, zero, 3 -; RV32IF-NEXT: addi a3, zero, 4 -; RV32IF-NEXT: addi a4, zero, 5 -; RV32IF-NEXT: addi a5, zero, 6 -; RV32IF-NEXT: addi a6, zero, 7 -; RV32IF-NEXT: addi a7, zero, 8 +; RV32IF-NEXT: li a0, 1 +; RV32IF-NEXT: li a1, 2 +; RV32IF-NEXT: li a2, 3 +; RV32IF-NEXT: li a3, 4 +; RV32IF-NEXT: li a4, 5 +; RV32IF-NEXT: li a5, 6 +; RV32IF-NEXT: li a6, 7 +; RV32IF-NEXT: li a7, 8 ; RV32IF-NEXT: sw t0, 0(sp) ; RV32IF-NEXT: call callee_half_on_stack@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -349,14 +349,14 @@ ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: lui a0, 1048565 ; RV64IF-NEXT: addiw t0, a0, -1792 -; RV64IF-NEXT: addi a0, zero, 1 -; RV64IF-NEXT: addi a1, zero, 2 -; RV64IF-NEXT: addi a2, zero, 3 -; RV64IF-NEXT: addi a3, zero, 4 -; RV64IF-NEXT: addi a4, zero, 5 -; RV64IF-NEXT: addi a5, zero, 6 -; RV64IF-NEXT: addi a6, zero, 7 -; RV64IF-NEXT: addi a7, zero, 8 +; RV64IF-NEXT: li a0, 1 +; RV64IF-NEXT: li a1, 2 +; RV64IF-NEXT: li a2, 3 +; RV64IF-NEXT: li a3, 4 +; RV64IF-NEXT: li a4, 5 +; RV64IF-NEXT: li a5, 6 +; RV64IF-NEXT: li a6, 7 +; RV64IF-NEXT: li a7, 8 ; RV64IF-NEXT: sw t0, 0(sp) ; RV64IF-NEXT: call callee_half_on_stack@plt ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -369,14 +369,14 @@ ; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI3_0) ; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI3_0)(a0) -; RV32-ILP32F-NEXT: addi a0, zero, 1 -; RV32-ILP32F-NEXT: addi a1, zero, 2 -; RV32-ILP32F-NEXT: addi a2, zero, 3 -; RV32-ILP32F-NEXT: addi a3, zero, 4 -; RV32-ILP32F-NEXT: addi a4, zero, 5 -; RV32-ILP32F-NEXT: addi a5, zero, 6 -; RV32-ILP32F-NEXT: addi a6, zero, 7 -; RV32-ILP32F-NEXT: addi a7, zero, 8 +; RV32-ILP32F-NEXT: li a0, 1 +; RV32-ILP32F-NEXT: li a1, 2 +; RV32-ILP32F-NEXT: li a2, 3 +; RV32-ILP32F-NEXT: li a3, 4 +; RV32-ILP32F-NEXT: li a4, 5 +; RV32-ILP32F-NEXT: li a5, 6 +; RV32-ILP32F-NEXT: li a6, 7 +; RV32-ILP32F-NEXT: li a7, 8 ; RV32-ILP32F-NEXT: call callee_half_on_stack@plt ; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32F-NEXT: addi sp, sp, 16 @@ -388,14 +388,14 @@ ; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-LP64F-NEXT: lui a0, %hi(.LCPI3_0) ; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI3_0)(a0) -; RV64-LP64F-NEXT: addi a0, zero, 1 -; RV64-LP64F-NEXT: addi a1, zero, 2 -; RV64-LP64F-NEXT: addi a2, zero, 3 -; RV64-LP64F-NEXT: addi a3, zero, 4 -; RV64-LP64F-NEXT: addi a4, zero, 5 -; RV64-LP64F-NEXT: addi a5, zero, 6 -; RV64-LP64F-NEXT: addi a6, zero, 7 -; RV64-LP64F-NEXT: addi a7, zero, 8 +; RV64-LP64F-NEXT: li a0, 1 +; RV64-LP64F-NEXT: li a1, 2 +; RV64-LP64F-NEXT: li a2, 3 +; RV64-LP64F-NEXT: li a3, 4 +; RV64-LP64F-NEXT: li a4, 5 +; RV64-LP64F-NEXT: li a5, 6 +; RV64-LP64F-NEXT: li a6, 7 +; RV64-LP64F-NEXT: li a7, 8 ; RV64-LP64F-NEXT: call callee_half_on_stack@plt ; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-LP64F-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll @@ -60,9 +60,9 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 1 +; RV32I-FPELIM-NEXT: li a0, 1 ; RV32I-FPELIM-NEXT: lui a2, 262144 -; RV32I-FPELIM-NEXT: mv a1, zero +; RV32I-FPELIM-NEXT: li a1, 0 ; RV32I-FPELIM-NEXT: call callee_double_in_regs@plt ; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -74,9 +74,9 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 +; RV32I-WITHFP-NEXT: li a0, 1 ; RV32I-WITHFP-NEXT: lui a2, 262144 -; RV32I-WITHFP-NEXT: mv a1, zero +; RV32I-WITHFP-NEXT: li a1, 0 ; RV32I-WITHFP-NEXT: call callee_double_in_regs@plt ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -145,11 +145,11 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -64 ; RV32I-FPELIM-NEXT: sw ra, 60(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 18 +; RV32I-FPELIM-NEXT: li a0, 18 ; RV32I-FPELIM-NEXT: sw a0, 24(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 17 +; RV32I-FPELIM-NEXT: li a0, 17 ; RV32I-FPELIM-NEXT: sw a0, 20(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 16 +; RV32I-FPELIM-NEXT: li a0, 16 ; RV32I-FPELIM-NEXT: sw a0, 16(sp) ; RV32I-FPELIM-NEXT: lui a0, 262236 ; RV32I-FPELIM-NEXT: addi a0, a0, 655 @@ -157,7 +157,7 @@ ; RV32I-FPELIM-NEXT: lui a0, 377487 ; RV32I-FPELIM-NEXT: addi a0, a0, 1475 ; RV32I-FPELIM-NEXT: sw a0, 8(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 15 +; RV32I-FPELIM-NEXT: li a0, 15 ; RV32I-FPELIM-NEXT: sw a0, 0(sp) ; RV32I-FPELIM-NEXT: lui a0, 262153 ; RV32I-FPELIM-NEXT: addi a0, a0, 491 @@ -172,13 +172,13 @@ ; RV32I-FPELIM-NEXT: addi t0, a0, 1311 ; RV32I-FPELIM-NEXT: lui a0, 688509 ; RV32I-FPELIM-NEXT: addi a5, a0, -2048 -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a1, zero, 11 +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 11 ; RV32I-FPELIM-NEXT: addi a2, sp, 32 -; RV32I-FPELIM-NEXT: addi a3, zero, 12 -; RV32I-FPELIM-NEXT: addi a4, zero, 13 -; RV32I-FPELIM-NEXT: addi a6, zero, 4 -; RV32I-FPELIM-NEXT: addi a7, zero, 14 +; RV32I-FPELIM-NEXT: li a3, 12 +; RV32I-FPELIM-NEXT: li a4, 13 +; RV32I-FPELIM-NEXT: li a6, 4 +; RV32I-FPELIM-NEXT: li a7, 14 ; RV32I-FPELIM-NEXT: sw t0, 32(sp) ; RV32I-FPELIM-NEXT: call callee_aligned_stack@plt ; RV32I-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload @@ -191,11 +191,11 @@ ; RV32I-WITHFP-NEXT: sw ra, 60(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 56(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 64 -; RV32I-WITHFP-NEXT: addi a0, zero, 18 +; RV32I-WITHFP-NEXT: li a0, 18 ; RV32I-WITHFP-NEXT: sw a0, 24(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 17 +; RV32I-WITHFP-NEXT: li a0, 17 ; RV32I-WITHFP-NEXT: sw a0, 20(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 16 +; RV32I-WITHFP-NEXT: li a0, 16 ; RV32I-WITHFP-NEXT: sw a0, 16(sp) ; RV32I-WITHFP-NEXT: lui a0, 262236 ; RV32I-WITHFP-NEXT: addi a0, a0, 655 @@ -203,7 +203,7 @@ ; RV32I-WITHFP-NEXT: lui a0, 377487 ; RV32I-WITHFP-NEXT: addi a0, a0, 1475 ; RV32I-WITHFP-NEXT: sw a0, 8(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 15 +; RV32I-WITHFP-NEXT: li a0, 15 ; RV32I-WITHFP-NEXT: sw a0, 0(sp) ; RV32I-WITHFP-NEXT: lui a0, 262153 ; RV32I-WITHFP-NEXT: addi a0, a0, 491 @@ -218,13 +218,13 @@ ; RV32I-WITHFP-NEXT: addi t0, a0, 1311 ; RV32I-WITHFP-NEXT: lui a0, 688509 ; RV32I-WITHFP-NEXT: addi a5, a0, -2048 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a1, zero, 11 +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 11 ; RV32I-WITHFP-NEXT: addi a2, s0, -32 -; RV32I-WITHFP-NEXT: addi a3, zero, 12 -; RV32I-WITHFP-NEXT: addi a4, zero, 13 -; RV32I-WITHFP-NEXT: addi a6, zero, 4 -; RV32I-WITHFP-NEXT: addi a7, zero, 14 +; RV32I-WITHFP-NEXT: li a3, 12 +; RV32I-WITHFP-NEXT: li a4, 13 +; RV32I-WITHFP-NEXT: li a6, 4 +; RV32I-WITHFP-NEXT: li a7, 14 ; RV32I-WITHFP-NEXT: sw t0, -32(s0) ; RV32I-WITHFP-NEXT: call callee_aligned_stack@plt ; RV32I-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload @@ -242,7 +242,7 @@ ; RV32I-FPELIM-LABEL: callee_small_scalar_ret: ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: lui a1, 261888 -; RV32I-FPELIM-NEXT: mv a0, zero +; RV32I-FPELIM-NEXT: li a0, 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_small_scalar_ret: @@ -252,7 +252,7 @@ ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: lui a1, 261888 -; RV32I-WITHFP-NEXT: mv a0, zero +; RV32I-WITHFP-NEXT: li a0, 0 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll @@ -51,9 +51,9 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a1, zero, 2 -; RV32I-FPELIM-NEXT: mv a2, zero +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 2 +; RV32I-FPELIM-NEXT: li a2, 0 ; RV32I-FPELIM-NEXT: call callee_i64_in_regs@plt ; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -65,9 +65,9 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a1, zero, 2 -; RV32I-WITHFP-NEXT: mv a2, zero +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 2 +; RV32I-WITHFP-NEXT: li a2, 0 ; RV32I-WITHFP-NEXT: call callee_i64_in_regs@plt ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -144,17 +144,17 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 8 +; RV32I-FPELIM-NEXT: li a0, 8 ; RV32I-FPELIM-NEXT: sw a0, 4(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a1, zero, 2 -; RV32I-FPELIM-NEXT: addi a2, zero, 3 -; RV32I-FPELIM-NEXT: addi a3, zero, 4 -; RV32I-FPELIM-NEXT: addi a5, zero, 5 -; RV32I-FPELIM-NEXT: addi a6, zero, 6 -; RV32I-FPELIM-NEXT: addi a7, zero, 7 +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 2 +; RV32I-FPELIM-NEXT: li a2, 3 +; RV32I-FPELIM-NEXT: li a3, 4 +; RV32I-FPELIM-NEXT: li a5, 5 +; RV32I-FPELIM-NEXT: li a6, 6 +; RV32I-FPELIM-NEXT: li a7, 7 ; RV32I-FPELIM-NEXT: sw zero, 0(sp) -; RV32I-FPELIM-NEXT: mv a4, zero +; RV32I-FPELIM-NEXT: li a4, 0 ; RV32I-FPELIM-NEXT: call callee_many_scalars@plt ; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -166,17 +166,17 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a0, zero, 8 +; RV32I-WITHFP-NEXT: li a0, 8 ; RV32I-WITHFP-NEXT: sw a0, 4(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a1, zero, 2 -; RV32I-WITHFP-NEXT: addi a2, zero, 3 -; RV32I-WITHFP-NEXT: addi a3, zero, 4 -; RV32I-WITHFP-NEXT: addi a5, zero, 5 -; RV32I-WITHFP-NEXT: addi a6, zero, 6 -; RV32I-WITHFP-NEXT: addi a7, zero, 7 +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 2 +; RV32I-WITHFP-NEXT: li a2, 3 +; RV32I-WITHFP-NEXT: li a3, 4 +; RV32I-WITHFP-NEXT: li a5, 5 +; RV32I-WITHFP-NEXT: li a6, 6 +; RV32I-WITHFP-NEXT: li a7, 7 ; RV32I-WITHFP-NEXT: sw zero, 0(sp) -; RV32I-WITHFP-NEXT: mv a4, zero +; RV32I-WITHFP-NEXT: li a4, 0 ; RV32I-WITHFP-NEXT: call callee_many_scalars@plt ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -255,7 +255,7 @@ ; RV32I-FPELIM-NEXT: sw zero, 36(sp) ; RV32I-FPELIM-NEXT: sw zero, 32(sp) ; RV32I-FPELIM-NEXT: sw zero, 28(sp) -; RV32I-FPELIM-NEXT: addi a2, zero, 1 +; RV32I-FPELIM-NEXT: li a2, 1 ; RV32I-FPELIM-NEXT: addi a0, sp, 24 ; RV32I-FPELIM-NEXT: mv a1, sp ; RV32I-FPELIM-NEXT: sw a2, 24(sp) @@ -278,7 +278,7 @@ ; RV32I-WITHFP-NEXT: sw zero, -12(s0) ; RV32I-WITHFP-NEXT: sw zero, -16(s0) ; RV32I-WITHFP-NEXT: sw zero, -20(s0) -; RV32I-WITHFP-NEXT: addi a2, zero, 1 +; RV32I-WITHFP-NEXT: li a2, 1 ; RV32I-WITHFP-NEXT: addi a0, s0, -24 ; RV32I-WITHFP-NEXT: addi a1, s0, -48 ; RV32I-WITHFP-NEXT: sw a2, -24(s0) @@ -357,7 +357,7 @@ ; RV32I-FPELIM-NEXT: sw ra, 60(sp) # 4-byte Folded Spill ; RV32I-FPELIM-NEXT: addi a0, sp, 16 ; RV32I-FPELIM-NEXT: sw a0, 4(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 9 +; RV32I-FPELIM-NEXT: li a0, 9 ; RV32I-FPELIM-NEXT: sw a0, 0(sp) ; RV32I-FPELIM-NEXT: lui a0, 524272 ; RV32I-FPELIM-NEXT: sw a0, 28(sp) @@ -367,14 +367,14 @@ ; RV32I-FPELIM-NEXT: sw zero, 52(sp) ; RV32I-FPELIM-NEXT: sw zero, 48(sp) ; RV32I-FPELIM-NEXT: sw zero, 44(sp) -; RV32I-FPELIM-NEXT: addi t0, zero, 8 -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a1, zero, 2 -; RV32I-FPELIM-NEXT: addi a2, zero, 3 -; RV32I-FPELIM-NEXT: addi a3, zero, 4 -; RV32I-FPELIM-NEXT: addi a4, zero, 5 -; RV32I-FPELIM-NEXT: addi a5, zero, 6 -; RV32I-FPELIM-NEXT: addi a6, zero, 7 +; RV32I-FPELIM-NEXT: li t0, 8 +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 2 +; RV32I-FPELIM-NEXT: li a2, 3 +; RV32I-FPELIM-NEXT: li a3, 4 +; RV32I-FPELIM-NEXT: li a4, 5 +; RV32I-FPELIM-NEXT: li a5, 6 +; RV32I-FPELIM-NEXT: li a6, 7 ; RV32I-FPELIM-NEXT: addi a7, sp, 40 ; RV32I-FPELIM-NEXT: sw t0, 40(sp) ; RV32I-FPELIM-NEXT: call callee_large_scalars_exhausted_regs@plt @@ -390,7 +390,7 @@ ; RV32I-WITHFP-NEXT: addi s0, sp, 64 ; RV32I-WITHFP-NEXT: addi a0, s0, -48 ; RV32I-WITHFP-NEXT: sw a0, 4(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 9 +; RV32I-WITHFP-NEXT: li a0, 9 ; RV32I-WITHFP-NEXT: sw a0, 0(sp) ; RV32I-WITHFP-NEXT: lui a0, 524272 ; RV32I-WITHFP-NEXT: sw a0, -36(s0) @@ -400,14 +400,14 @@ ; RV32I-WITHFP-NEXT: sw zero, -12(s0) ; RV32I-WITHFP-NEXT: sw zero, -16(s0) ; RV32I-WITHFP-NEXT: sw zero, -20(s0) -; RV32I-WITHFP-NEXT: addi t0, zero, 8 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a1, zero, 2 -; RV32I-WITHFP-NEXT: addi a2, zero, 3 -; RV32I-WITHFP-NEXT: addi a3, zero, 4 -; RV32I-WITHFP-NEXT: addi a4, zero, 5 -; RV32I-WITHFP-NEXT: addi a5, zero, 6 -; RV32I-WITHFP-NEXT: addi a6, zero, 7 +; RV32I-WITHFP-NEXT: li t0, 8 +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 2 +; RV32I-WITHFP-NEXT: li a2, 3 +; RV32I-WITHFP-NEXT: li a3, 4 +; RV32I-WITHFP-NEXT: li a4, 5 +; RV32I-WITHFP-NEXT: li a5, 6 +; RV32I-WITHFP-NEXT: li a6, 7 ; RV32I-WITHFP-NEXT: addi a7, s0, -24 ; RV32I-WITHFP-NEXT: sw t0, -24(s0) ; RV32I-WITHFP-NEXT: call callee_large_scalars_exhausted_regs@plt @@ -493,8 +493,8 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a1, zero, 2 +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 2 ; RV32I-FPELIM-NEXT: call callee_small_coerced_struct@plt ; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -506,8 +506,8 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a1, zero, 2 +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 2 ; RV32I-WITHFP-NEXT: call callee_small_coerced_struct@plt ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -555,13 +555,13 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -48 ; RV32I-FPELIM-NEXT: sw ra, 44(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 1 +; RV32I-FPELIM-NEXT: li a0, 1 ; RV32I-FPELIM-NEXT: sw a0, 24(sp) -; RV32I-FPELIM-NEXT: addi a1, zero, 2 +; RV32I-FPELIM-NEXT: li a1, 2 ; RV32I-FPELIM-NEXT: sw a1, 28(sp) -; RV32I-FPELIM-NEXT: addi a2, zero, 3 +; RV32I-FPELIM-NEXT: li a2, 3 ; RV32I-FPELIM-NEXT: sw a2, 32(sp) -; RV32I-FPELIM-NEXT: addi a3, zero, 4 +; RV32I-FPELIM-NEXT: li a3, 4 ; RV32I-FPELIM-NEXT: sw a3, 36(sp) ; RV32I-FPELIM-NEXT: sw a0, 8(sp) ; RV32I-FPELIM-NEXT: sw a1, 12(sp) @@ -579,13 +579,13 @@ ; RV32I-WITHFP-NEXT: sw ra, 44(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 40(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 48 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 +; RV32I-WITHFP-NEXT: li a0, 1 ; RV32I-WITHFP-NEXT: sw a0, -24(s0) -; RV32I-WITHFP-NEXT: addi a1, zero, 2 +; RV32I-WITHFP-NEXT: li a1, 2 ; RV32I-WITHFP-NEXT: sw a1, -20(s0) -; RV32I-WITHFP-NEXT: addi a2, zero, 3 +; RV32I-WITHFP-NEXT: li a2, 3 ; RV32I-WITHFP-NEXT: sw a2, -16(s0) -; RV32I-WITHFP-NEXT: addi a3, zero, 4 +; RV32I-WITHFP-NEXT: li a3, 4 ; RV32I-WITHFP-NEXT: sw a3, -12(s0) ; RV32I-WITHFP-NEXT: sw a0, -40(s0) ; RV32I-WITHFP-NEXT: sw a1, -36(s0) @@ -669,16 +669,16 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -64 ; RV32I-FPELIM-NEXT: sw ra, 60(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 19 +; RV32I-FPELIM-NEXT: li a0, 19 ; RV32I-FPELIM-NEXT: sw a0, 24(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 18 +; RV32I-FPELIM-NEXT: li a0, 18 ; RV32I-FPELIM-NEXT: sw a0, 20(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 17 +; RV32I-FPELIM-NEXT: li a0, 17 ; RV32I-FPELIM-NEXT: sw a0, 16(sp) ; RV32I-FPELIM-NEXT: sw zero, 12(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 16 +; RV32I-FPELIM-NEXT: li a0, 16 ; RV32I-FPELIM-NEXT: sw a0, 8(sp) -; RV32I-FPELIM-NEXT: addi a0, zero, 15 +; RV32I-FPELIM-NEXT: li a0, 15 ; RV32I-FPELIM-NEXT: sw a0, 0(sp) ; RV32I-FPELIM-NEXT: lui a0, 262153 ; RV32I-FPELIM-NEXT: addi a0, a0, 491 @@ -693,13 +693,13 @@ ; RV32I-FPELIM-NEXT: addi t0, a0, 1311 ; RV32I-FPELIM-NEXT: lui a0, 688509 ; RV32I-FPELIM-NEXT: addi a5, a0, -2048 -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a1, zero, 11 +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 11 ; RV32I-FPELIM-NEXT: addi a2, sp, 32 -; RV32I-FPELIM-NEXT: addi a3, zero, 12 -; RV32I-FPELIM-NEXT: addi a4, zero, 13 -; RV32I-FPELIM-NEXT: addi a6, zero, 4 -; RV32I-FPELIM-NEXT: addi a7, zero, 14 +; RV32I-FPELIM-NEXT: li a3, 12 +; RV32I-FPELIM-NEXT: li a4, 13 +; RV32I-FPELIM-NEXT: li a6, 4 +; RV32I-FPELIM-NEXT: li a7, 14 ; RV32I-FPELIM-NEXT: sw t0, 32(sp) ; RV32I-FPELIM-NEXT: call callee_aligned_stack@plt ; RV32I-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload @@ -712,16 +712,16 @@ ; RV32I-WITHFP-NEXT: sw ra, 60(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 56(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 64 -; RV32I-WITHFP-NEXT: addi a0, zero, 19 +; RV32I-WITHFP-NEXT: li a0, 19 ; RV32I-WITHFP-NEXT: sw a0, 24(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 18 +; RV32I-WITHFP-NEXT: li a0, 18 ; RV32I-WITHFP-NEXT: sw a0, 20(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 17 +; RV32I-WITHFP-NEXT: li a0, 17 ; RV32I-WITHFP-NEXT: sw a0, 16(sp) ; RV32I-WITHFP-NEXT: sw zero, 12(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 16 +; RV32I-WITHFP-NEXT: li a0, 16 ; RV32I-WITHFP-NEXT: sw a0, 8(sp) -; RV32I-WITHFP-NEXT: addi a0, zero, 15 +; RV32I-WITHFP-NEXT: li a0, 15 ; RV32I-WITHFP-NEXT: sw a0, 0(sp) ; RV32I-WITHFP-NEXT: lui a0, 262153 ; RV32I-WITHFP-NEXT: addi a0, a0, 491 @@ -736,13 +736,13 @@ ; RV32I-WITHFP-NEXT: addi t0, a0, 1311 ; RV32I-WITHFP-NEXT: lui a0, 688509 ; RV32I-WITHFP-NEXT: addi a5, a0, -2048 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a1, zero, 11 +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 11 ; RV32I-WITHFP-NEXT: addi a2, s0, -32 -; RV32I-WITHFP-NEXT: addi a3, zero, 12 -; RV32I-WITHFP-NEXT: addi a4, zero, 13 -; RV32I-WITHFP-NEXT: addi a6, zero, 4 -; RV32I-WITHFP-NEXT: addi a7, zero, 14 +; RV32I-WITHFP-NEXT: li a3, 12 +; RV32I-WITHFP-NEXT: li a4, 13 +; RV32I-WITHFP-NEXT: li a6, 4 +; RV32I-WITHFP-NEXT: li a7, 14 ; RV32I-WITHFP-NEXT: sw t0, -32(s0) ; RV32I-WITHFP-NEXT: call callee_aligned_stack@plt ; RV32I-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload @@ -763,7 +763,7 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: lui a0, 466866 ; RV32I-FPELIM-NEXT: addi a0, a0, 1677 -; RV32I-FPELIM-NEXT: addi a1, zero, 287 +; RV32I-FPELIM-NEXT: li a1, 287 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_small_scalar_ret: @@ -774,7 +774,7 @@ ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: lui a0, 466866 ; RV32I-WITHFP-NEXT: addi a0, a0, 1677 -; RV32I-WITHFP-NEXT: addi a1, zero, 287 +; RV32I-WITHFP-NEXT: li a1, 287 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 16 @@ -830,8 +830,8 @@ define %struct.small @callee_small_struct_ret() nounwind { ; RV32I-FPELIM-LABEL: callee_small_struct_ret: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: mv a1, zero +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a1, 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_small_struct_ret: @@ -840,8 +840,8 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: mv a1, zero +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a1, 0 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 16 @@ -942,13 +942,13 @@ define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large) %agg.result) nounwind { ; RV32I-FPELIM-LABEL: callee_large_struct_ret: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: addi a1, zero, 1 +; RV32I-FPELIM-NEXT: li a1, 1 ; RV32I-FPELIM-NEXT: sw a1, 0(a0) -; RV32I-FPELIM-NEXT: addi a1, zero, 2 +; RV32I-FPELIM-NEXT: li a1, 2 ; RV32I-FPELIM-NEXT: sw a1, 4(a0) -; RV32I-FPELIM-NEXT: addi a1, zero, 3 +; RV32I-FPELIM-NEXT: li a1, 3 ; RV32I-FPELIM-NEXT: sw a1, 8(a0) -; RV32I-FPELIM-NEXT: addi a1, zero, 4 +; RV32I-FPELIM-NEXT: li a1, 4 ; RV32I-FPELIM-NEXT: sw a1, 12(a0) ; RV32I-FPELIM-NEXT: ret ; @@ -958,13 +958,13 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a1, zero, 1 +; RV32I-WITHFP-NEXT: li a1, 1 ; RV32I-WITHFP-NEXT: sw a1, 0(a0) -; RV32I-WITHFP-NEXT: addi a1, zero, 2 +; RV32I-WITHFP-NEXT: li a1, 2 ; RV32I-WITHFP-NEXT: sw a1, 4(a0) -; RV32I-WITHFP-NEXT: addi a1, zero, 3 +; RV32I-WITHFP-NEXT: li a1, 3 ; RV32I-WITHFP-NEXT: sw a1, 8(a0) -; RV32I-WITHFP-NEXT: addi a1, zero, 4 +; RV32I-WITHFP-NEXT: li a1, 4 ; RV32I-WITHFP-NEXT: sw a1, 12(a0) ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll @@ -53,7 +53,7 @@ ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-FPELIM-NEXT: addi a0, zero, 1 +; RV32I-FPELIM-NEXT: li a0, 1 ; RV32I-FPELIM-NEXT: lui a1, 262144 ; RV32I-FPELIM-NEXT: call callee_float_in_regs@plt ; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -66,7 +66,7 @@ ; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 +; RV32I-WITHFP-NEXT: li a0, 1 ; RV32I-WITHFP-NEXT: lui a1, 262144 ; RV32I-WITHFP-NEXT: call callee_float_in_regs@plt ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -108,15 +108,15 @@ ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-FPELIM-NEXT: lui a1, 264704 -; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a2, zero, 2 -; RV32I-FPELIM-NEXT: addi a4, zero, 3 -; RV32I-FPELIM-NEXT: addi a6, zero, 4 +; RV32I-FPELIM-NEXT: li a0, 1 +; RV32I-FPELIM-NEXT: li a2, 2 +; RV32I-FPELIM-NEXT: li a4, 3 +; RV32I-FPELIM-NEXT: li a6, 4 ; RV32I-FPELIM-NEXT: sw a1, 0(sp) -; RV32I-FPELIM-NEXT: mv a1, zero -; RV32I-FPELIM-NEXT: mv a3, zero -; RV32I-FPELIM-NEXT: mv a5, zero -; RV32I-FPELIM-NEXT: mv a7, zero +; RV32I-FPELIM-NEXT: li a1, 0 +; RV32I-FPELIM-NEXT: li a3, 0 +; RV32I-FPELIM-NEXT: li a5, 0 +; RV32I-FPELIM-NEXT: li a7, 0 ; RV32I-FPELIM-NEXT: call callee_float_on_stack@plt ; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -129,15 +129,15 @@ ; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: lui a1, 264704 -; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a2, zero, 2 -; RV32I-WITHFP-NEXT: addi a4, zero, 3 -; RV32I-WITHFP-NEXT: addi a6, zero, 4 +; RV32I-WITHFP-NEXT: li a0, 1 +; RV32I-WITHFP-NEXT: li a2, 2 +; RV32I-WITHFP-NEXT: li a4, 3 +; RV32I-WITHFP-NEXT: li a6, 4 ; RV32I-WITHFP-NEXT: sw a1, 0(sp) -; RV32I-WITHFP-NEXT: mv a1, zero -; RV32I-WITHFP-NEXT: mv a3, zero -; RV32I-WITHFP-NEXT: mv a5, zero -; RV32I-WITHFP-NEXT: mv a7, zero +; RV32I-WITHFP-NEXT: li a1, 0 +; RV32I-WITHFP-NEXT: li a3, 0 +; RV32I-WITHFP-NEXT: li a5, 0 +; RV32I-WITHFP-NEXT: li a7, 0 ; RV32I-WITHFP-NEXT: call callee_float_on_stack@plt ; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll @@ -24,7 +24,7 @@ ; RV32-ILP32D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI1_0) ; RV32-ILP32D-NEXT: fld fa0, %lo(.LCPI1_0)(a0) -; RV32-ILP32D-NEXT: addi a0, zero, 1 +; RV32-ILP32D-NEXT: li a0, 1 ; RV32-ILP32D-NEXT: call callee_double_in_fpr@plt ; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32D-NEXT: addi sp, sp, 16 @@ -51,18 +51,18 @@ ; RV32-ILP32D: # %bb.0: ; RV32-ILP32D-NEXT: addi sp, sp, -16 ; RV32-ILP32D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32D-NEXT: addi a1, zero, 5 +; RV32-ILP32D-NEXT: li a1, 5 ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI3_0) ; RV32-ILP32D-NEXT: fld fa0, %lo(.LCPI3_0)(a0) -; RV32-ILP32D-NEXT: addi a0, zero, 1 -; RV32-ILP32D-NEXT: addi a2, zero, 2 -; RV32-ILP32D-NEXT: addi a4, zero, 3 -; RV32-ILP32D-NEXT: addi a6, zero, 4 +; RV32-ILP32D-NEXT: li a0, 1 +; RV32-ILP32D-NEXT: li a2, 2 +; RV32-ILP32D-NEXT: li a4, 3 +; RV32-ILP32D-NEXT: li a6, 4 ; RV32-ILP32D-NEXT: sw a1, 0(sp) -; RV32-ILP32D-NEXT: mv a1, zero -; RV32-ILP32D-NEXT: mv a3, zero -; RV32-ILP32D-NEXT: mv a5, zero -; RV32-ILP32D-NEXT: mv a7, zero +; RV32-ILP32D-NEXT: li a1, 0 +; RV32-ILP32D-NEXT: li a3, 0 +; RV32-ILP32D-NEXT: li a5, 0 +; RV32-ILP32D-NEXT: li a7, 0 ; RV32-ILP32D-NEXT: call callee_double_in_fpr_exhausted_gprs@plt ; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32D-NEXT: addi sp, sp, 16 @@ -113,7 +113,7 @@ ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_7) ; RV32-ILP32D-NEXT: fld fa7, %lo(.LCPI5_7)(a0) ; RV32-ILP32D-NEXT: lui a1, 262688 -; RV32-ILP32D-NEXT: mv a0, zero +; RV32-ILP32D-NEXT: li a0, 0 ; RV32-ILP32D-NEXT: call callee_double_in_gpr_exhausted_fprs@plt ; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32D-NEXT: addi sp, sp, 16 @@ -164,15 +164,15 @@ ; RV32-ILP32D-NEXT: fld fa6, %lo(.LCPI7_6)(a0) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_7) ; RV32-ILP32D-NEXT: fld fa7, %lo(.LCPI7_7)(a0) -; RV32-ILP32D-NEXT: addi a0, zero, 1 -; RV32-ILP32D-NEXT: addi a2, zero, 3 -; RV32-ILP32D-NEXT: addi a4, zero, 5 -; RV32-ILP32D-NEXT: addi a6, zero, 7 +; RV32-ILP32D-NEXT: li a0, 1 +; RV32-ILP32D-NEXT: li a2, 3 +; RV32-ILP32D-NEXT: li a4, 5 +; RV32-ILP32D-NEXT: li a6, 7 ; RV32-ILP32D-NEXT: sw a1, 0(sp) -; RV32-ILP32D-NEXT: mv a1, zero -; RV32-ILP32D-NEXT: mv a3, zero -; RV32-ILP32D-NEXT: mv a5, zero -; RV32-ILP32D-NEXT: mv a7, zero +; RV32-ILP32D-NEXT: li a1, 0 +; RV32-ILP32D-NEXT: li a3, 0 +; RV32-ILP32D-NEXT: li a5, 0 +; RV32-ILP32D-NEXT: li a7, 0 ; RV32-ILP32D-NEXT: call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs@plt ; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32D-NEXT: addi sp, sp, 16 @@ -221,15 +221,15 @@ ; RV32-ILP32D-NEXT: fld fa6, %lo(.LCPI9_6)(a0) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_7) ; RV32-ILP32D-NEXT: fld fa7, %lo(.LCPI9_7)(a0) -; RV32-ILP32D-NEXT: addi a0, zero, 1 -; RV32-ILP32D-NEXT: addi a2, zero, 3 -; RV32-ILP32D-NEXT: addi a4, zero, 5 -; RV32-ILP32D-NEXT: addi a6, zero, 7 +; RV32-ILP32D-NEXT: li a0, 1 +; RV32-ILP32D-NEXT: li a2, 3 +; RV32-ILP32D-NEXT: li a4, 5 +; RV32-ILP32D-NEXT: li a6, 7 ; RV32-ILP32D-NEXT: sw zero, 0(sp) -; RV32-ILP32D-NEXT: mv a1, zero -; RV32-ILP32D-NEXT: mv a3, zero -; RV32-ILP32D-NEXT: mv a5, zero -; RV32-ILP32D-NEXT: mv a7, zero +; RV32-ILP32D-NEXT: li a1, 0 +; RV32-ILP32D-NEXT: li a3, 0 +; RV32-ILP32D-NEXT: li a5, 0 +; RV32-ILP32D-NEXT: li a7, 0 ; RV32-ILP32D-NEXT: call callee_double_on_stack_exhausted_gprs_fprs@plt ; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32D-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll @@ -27,7 +27,7 @@ ; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI1_0) ; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI1_0)(a0) -; RV32-ILP32FD-NEXT: addi a0, zero, 1 +; RV32-ILP32FD-NEXT: li a0, 1 ; RV32-ILP32FD-NEXT: call callee_float_in_fpr@plt ; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32FD-NEXT: addi sp, sp, 16 @@ -54,18 +54,18 @@ ; RV32-ILP32FD: # %bb.0: ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32FD-NEXT: addi a1, zero, 5 +; RV32-ILP32FD-NEXT: li a1, 5 ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI3_0) ; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI3_0)(a0) -; RV32-ILP32FD-NEXT: addi a0, zero, 1 -; RV32-ILP32FD-NEXT: addi a2, zero, 2 -; RV32-ILP32FD-NEXT: addi a4, zero, 3 -; RV32-ILP32FD-NEXT: addi a6, zero, 4 +; RV32-ILP32FD-NEXT: li a0, 1 +; RV32-ILP32FD-NEXT: li a2, 2 +; RV32-ILP32FD-NEXT: li a4, 3 +; RV32-ILP32FD-NEXT: li a6, 4 ; RV32-ILP32FD-NEXT: sw a1, 0(sp) -; RV32-ILP32FD-NEXT: mv a1, zero -; RV32-ILP32FD-NEXT: mv a3, zero -; RV32-ILP32FD-NEXT: mv a5, zero -; RV32-ILP32FD-NEXT: mv a7, zero +; RV32-ILP32FD-NEXT: li a1, 0 +; RV32-ILP32FD-NEXT: li a3, 0 +; RV32-ILP32FD-NEXT: li a5, 0 +; RV32-ILP32FD-NEXT: li a7, 0 ; RV32-ILP32FD-NEXT: call callee_float_in_fpr_exhausted_gprs@plt ; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32FD-NEXT: addi sp, sp, 16 @@ -158,15 +158,15 @@ ; RV32-ILP32FD-NEXT: flw fa6, %lo(.LCPI7_6)(a0) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_7) ; RV32-ILP32FD-NEXT: flw fa7, %lo(.LCPI7_7)(a0) -; RV32-ILP32FD-NEXT: addi a0, zero, 1 -; RV32-ILP32FD-NEXT: addi a2, zero, 3 -; RV32-ILP32FD-NEXT: addi a4, zero, 5 -; RV32-ILP32FD-NEXT: addi a6, zero, 7 +; RV32-ILP32FD-NEXT: li a0, 1 +; RV32-ILP32FD-NEXT: li a2, 3 +; RV32-ILP32FD-NEXT: li a4, 5 +; RV32-ILP32FD-NEXT: li a6, 7 ; RV32-ILP32FD-NEXT: sw a1, 0(sp) -; RV32-ILP32FD-NEXT: mv a1, zero -; RV32-ILP32FD-NEXT: mv a3, zero -; RV32-ILP32FD-NEXT: mv a5, zero -; RV32-ILP32FD-NEXT: mv a7, zero +; RV32-ILP32FD-NEXT: li a1, 0 +; RV32-ILP32FD-NEXT: li a3, 0 +; RV32-ILP32FD-NEXT: li a5, 0 +; RV32-ILP32FD-NEXT: li a7, 0 ; RV32-ILP32FD-NEXT: call callee_float_on_stack_exhausted_gprs_fprs@plt ; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-ILP32FD-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll @@ -33,9 +33,9 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: slli a1, a0, 62 -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: call callee_double_in_regs@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -47,7 +47,7 @@ define double @callee_double_ret() nounwind { ; RV64I-LABEL: callee_double_ret: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1023 +; RV64I-NEXT: li a0, 1023 ; RV64I-NEXT: slli a0, a0, 52 ; RV64I-NEXT: ret ret double 1.0 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll @@ -32,9 +32,9 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 2 +; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call callee_i128_in_regs@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -83,17 +83,17 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 8 +; RV64I-NEXT: li a0, 8 ; RV64I-NEXT: sd a0, 8(sp) -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: addi a2, zero, 3 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a5, zero, 5 -; RV64I-NEXT: addi a6, zero, 6 -; RV64I-NEXT: addi a7, zero, 7 +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 2 +; RV64I-NEXT: li a2, 3 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a5, 5 +; RV64I-NEXT: li a6, 6 +; RV64I-NEXT: li a7, 7 ; RV64I-NEXT: sd zero, 0(sp) -; RV64I-NEXT: mv a4, zero +; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call callee_many_scalars@plt ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 @@ -137,12 +137,12 @@ ; RV64I-NEXT: sd zero, 24(sp) ; RV64I-NEXT: sd zero, 16(sp) ; RV64I-NEXT: sd zero, 8(sp) -; RV64I-NEXT: addi a0, zero, 2 +; RV64I-NEXT: li a0, 2 ; RV64I-NEXT: sd a0, 0(sp) ; RV64I-NEXT: sd zero, 56(sp) ; RV64I-NEXT: sd zero, 48(sp) ; RV64I-NEXT: sd zero, 40(sp) -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: addi a0, sp, 32 ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: sd a2, 32(sp) @@ -191,24 +191,24 @@ ; RV64I-NEXT: sd ra, 88(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a0, sp, 16 ; RV64I-NEXT: sd a0, 8(sp) -; RV64I-NEXT: addi a0, zero, 9 +; RV64I-NEXT: li a0, 9 ; RV64I-NEXT: sd a0, 0(sp) ; RV64I-NEXT: sd zero, 40(sp) ; RV64I-NEXT: sd zero, 32(sp) ; RV64I-NEXT: sd zero, 24(sp) -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: sd a0, 16(sp) ; RV64I-NEXT: sd zero, 72(sp) ; RV64I-NEXT: sd zero, 64(sp) ; RV64I-NEXT: sd zero, 56(sp) -; RV64I-NEXT: addi t0, zero, 8 -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: addi a2, zero, 3 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: addi a5, zero, 6 -; RV64I-NEXT: addi a6, zero, 7 +; RV64I-NEXT: li t0, 8 +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 2 +; RV64I-NEXT: li a2, 3 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 5 +; RV64I-NEXT: li a5, 6 +; RV64I-NEXT: li a6, 7 ; RV64I-NEXT: addi a7, sp, 48 ; RV64I-NEXT: sd t0, 48(sp) ; RV64I-NEXT: call callee_large_scalars_exhausted_regs@plt @@ -260,8 +260,8 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call callee_small_coerced_struct@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -294,13 +294,13 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -80 ; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: sd a0, 40(sp) -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: sd a1, 48(sp) -; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: li a2, 3 ; RV64I-NEXT: sd a2, 56(sp) -; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: li a3, 4 ; RV64I-NEXT: sd a3, 64(sp) ; RV64I-NEXT: sd a0, 8(sp) ; RV64I-NEXT: sd a1, 16(sp) @@ -360,25 +360,25 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -64 ; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 12 +; RV64I-NEXT: li a0, 12 ; RV64I-NEXT: sd a0, 48(sp) -; RV64I-NEXT: addi a0, zero, 11 +; RV64I-NEXT: li a0, 11 ; RV64I-NEXT: sd a0, 40(sp) -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: sd a0, 32(sp) ; RV64I-NEXT: sd zero, 24(sp) -; RV64I-NEXT: addi a0, zero, 9 +; RV64I-NEXT: li a0, 9 ; RV64I-NEXT: sd a0, 16(sp) -; RV64I-NEXT: addi a6, zero, 8 -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: addi a2, zero, 3 -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: addi a5, zero, 6 -; RV64I-NEXT: addi a7, zero, 7 +; RV64I-NEXT: li a6, 8 +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 2 +; RV64I-NEXT: li a2, 3 +; RV64I-NEXT: li a3, 4 +; RV64I-NEXT: li a4, 5 +; RV64I-NEXT: li a5, 6 +; RV64I-NEXT: li a7, 7 ; RV64I-NEXT: sd a6, 0(sp) -; RV64I-NEXT: mv a6, zero +; RV64I-NEXT: li a6, 0 ; RV64I-NEXT: call callee_aligned_stack@plt ; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 64 @@ -393,8 +393,8 @@ define i128 @callee_small_scalar_ret() nounwind { ; RV64I-LABEL: callee_small_scalar_ret: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: ret ret i128 -1 } @@ -423,8 +423,8 @@ define %struct.small @callee_small_struct_ret() nounwind { ; RV64I-LABEL: callee_small_struct_ret: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a0, 1 +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: ret ret %struct.small { i64 1, i64* null } } @@ -452,7 +452,7 @@ define i256 @callee_large_scalar_ret() nounwind { ; RV64I-LABEL: callee_large_scalar_ret: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: sd a1, 24(a0) ; RV64I-NEXT: sd a1, 16(a0) ; RV64I-NEXT: sd a1, 8(a0) @@ -483,16 +483,16 @@ ; RV64I-LABEL: callee_large_struct_ret: ; RV64I: # %bb.0: ; RV64I-NEXT: sw zero, 4(a0) -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: sw a1, 0(a0) ; RV64I-NEXT: sw zero, 12(a0) -; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: sw a1, 8(a0) ; RV64I-NEXT: sw zero, 20(a0) -; RV64I-NEXT: addi a1, zero, 3 +; RV64I-NEXT: li a1, 3 ; RV64I-NEXT: sw a1, 16(a0) ; RV64I-NEXT: sw zero, 28(a0) -; RV64I-NEXT: addi a1, zero, 4 +; RV64I-NEXT: li a1, 4 ; RV64I-NEXT: sw a1, 24(a0) ; RV64I-NEXT: ret %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 0 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll @@ -55,7 +55,7 @@ ; RV64I-FPELIM: # %bb.0: ; RV64I-FPELIM-NEXT: addi sp, sp, -16 ; RV64I-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-FPELIM-NEXT: addi a0, zero, 1 +; RV64I-FPELIM-NEXT: li a0, 1 ; RV64I-FPELIM-NEXT: lui a1, 262144 ; RV64I-FPELIM-NEXT: call callee_float_in_regs@plt ; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -68,7 +68,7 @@ ; RV64I-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64I-WITHFP-NEXT: addi s0, sp, 16 -; RV64I-WITHFP-NEXT: addi a0, zero, 1 +; RV64I-WITHFP-NEXT: li a0, 1 ; RV64I-WITHFP-NEXT: lui a1, 262144 ; RV64I-WITHFP-NEXT: call callee_float_in_regs@plt ; RV64I-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload @@ -109,15 +109,15 @@ ; RV64I-FPELIM-NEXT: addi sp, sp, -16 ; RV64I-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-FPELIM-NEXT: lui a1, 264704 -; RV64I-FPELIM-NEXT: addi a0, zero, 1 -; RV64I-FPELIM-NEXT: addi a2, zero, 2 -; RV64I-FPELIM-NEXT: addi a4, zero, 3 -; RV64I-FPELIM-NEXT: addi a6, zero, 4 +; RV64I-FPELIM-NEXT: li a0, 1 +; RV64I-FPELIM-NEXT: li a2, 2 +; RV64I-FPELIM-NEXT: li a4, 3 +; RV64I-FPELIM-NEXT: li a6, 4 ; RV64I-FPELIM-NEXT: sd a1, 0(sp) -; RV64I-FPELIM-NEXT: mv a1, zero -; RV64I-FPELIM-NEXT: mv a3, zero -; RV64I-FPELIM-NEXT: mv a5, zero -; RV64I-FPELIM-NEXT: mv a7, zero +; RV64I-FPELIM-NEXT: li a1, 0 +; RV64I-FPELIM-NEXT: li a3, 0 +; RV64I-FPELIM-NEXT: li a5, 0 +; RV64I-FPELIM-NEXT: li a7, 0 ; RV64I-FPELIM-NEXT: call callee_float_on_stack@plt ; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-FPELIM-NEXT: addi sp, sp, 16 @@ -130,15 +130,15 @@ ; RV64I-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-WITHFP-NEXT: addi s0, sp, 32 ; RV64I-WITHFP-NEXT: lui a1, 264704 -; RV64I-WITHFP-NEXT: addi a0, zero, 1 -; RV64I-WITHFP-NEXT: addi a2, zero, 2 -; RV64I-WITHFP-NEXT: addi a4, zero, 3 -; RV64I-WITHFP-NEXT: addi a6, zero, 4 +; RV64I-WITHFP-NEXT: li a0, 1 +; RV64I-WITHFP-NEXT: li a2, 2 +; RV64I-WITHFP-NEXT: li a4, 3 +; RV64I-WITHFP-NEXT: li a6, 4 ; RV64I-WITHFP-NEXT: sd a1, 0(sp) -; RV64I-WITHFP-NEXT: mv a1, zero -; RV64I-WITHFP-NEXT: mv a3, zero -; RV64I-WITHFP-NEXT: mv a5, zero -; RV64I-WITHFP-NEXT: mv a7, zero +; RV64I-WITHFP-NEXT: li a1, 0 +; RV64I-WITHFP-NEXT: li a3, 0 +; RV64I-WITHFP-NEXT: li a5, 0 +; RV64I-WITHFP-NEXT: li a7, 0 ; RV64I-WITHFP-NEXT: call callee_float_on_stack@plt ; RV64I-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll @@ -34,15 +34,15 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw a0, 4(sp) ; RV32IF-NEXT: lui a1, 264704 -; RV32IF-NEXT: addi a0, zero, 1 -; RV32IF-NEXT: addi a2, zero, 2 -; RV32IF-NEXT: addi a4, zero, 3 -; RV32IF-NEXT: addi a6, zero, 4 +; RV32IF-NEXT: li a0, 1 +; RV32IF-NEXT: li a2, 2 +; RV32IF-NEXT: li a4, 3 +; RV32IF-NEXT: li a6, 4 ; RV32IF-NEXT: sw a1, 0(sp) -; RV32IF-NEXT: mv a1, zero -; RV32IF-NEXT: mv a3, zero -; RV32IF-NEXT: mv a5, zero -; RV32IF-NEXT: mv a7, zero +; RV32IF-NEXT: li a1, 0 +; RV32IF-NEXT: li a3, 0 +; RV32IF-NEXT: li a5, 0 +; RV32IF-NEXT: li a7, 0 ; RV32IF-NEXT: call onstack_f32_noop@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -61,15 +61,15 @@ ; RV32IF-NEXT: fadd.s ft2, ft1, ft0 ; RV32IF-NEXT: fsub.s ft0, ft0, ft1 ; RV32IF-NEXT: fsw ft0, 4(sp) -; RV32IF-NEXT: addi a0, zero, 1 -; RV32IF-NEXT: addi a2, zero, 2 -; RV32IF-NEXT: addi a4, zero, 3 -; RV32IF-NEXT: addi a6, zero, 4 +; RV32IF-NEXT: li a0, 1 +; RV32IF-NEXT: li a2, 2 +; RV32IF-NEXT: li a4, 3 +; RV32IF-NEXT: li a6, 4 ; RV32IF-NEXT: fsw ft2, 0(sp) -; RV32IF-NEXT: mv a1, zero -; RV32IF-NEXT: mv a3, zero -; RV32IF-NEXT: mv a5, zero -; RV32IF-NEXT: mv a7, zero +; RV32IF-NEXT: li a1, 0 +; RV32IF-NEXT: li a3, 0 +; RV32IF-NEXT: li a5, 0 +; RV32IF-NEXT: li a7, 0 ; RV32IF-NEXT: call onstack_f32_noop@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll --- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll +++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll @@ -33,7 +33,7 @@ ; RV32I-SMALL-LABEL: lower_blockaddress: ; RV32I-SMALL: # %bb.0: ; RV32I-SMALL-NEXT: lui a0, %hi(addr) -; RV32I-SMALL-NEXT: addi a1, zero, 1 +; RV32I-SMALL-NEXT: li a1, 1 ; RV32I-SMALL-NEXT: sw a1, %lo(addr)(a0) ; RV32I-SMALL-NEXT: ret ; @@ -42,7 +42,7 @@ ; RV32I-MEDIUM-NEXT: .LBB1_1: # Label of block must be emitted ; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(addr) ; RV32I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.LBB1_1) -; RV32I-MEDIUM-NEXT: addi a1, zero, 1 +; RV32I-MEDIUM-NEXT: li a1, 1 ; RV32I-MEDIUM-NEXT: sw a1, 0(a0) ; RV32I-MEDIUM-NEXT: ret store volatile i8* blockaddress(@lower_blockaddress, %block), i8** @addr @@ -60,7 +60,7 @@ ; RV32I-SMALL-NEXT: addi sp, sp, -16 ; RV32I-SMALL-NEXT: lui a1, %hi(.Ltmp0) ; RV32I-SMALL-NEXT: addi a1, a1, %lo(.Ltmp0) -; RV32I-SMALL-NEXT: addi a2, zero, 101 +; RV32I-SMALL-NEXT: li a2, 101 ; RV32I-SMALL-NEXT: sw a1, 8(sp) ; RV32I-SMALL-NEXT: blt a0, a2, .LBB2_3 ; RV32I-SMALL-NEXT: # %bb.1: # %if.then @@ -68,11 +68,11 @@ ; RV32I-SMALL-NEXT: jr a0 ; RV32I-SMALL-NEXT: .Ltmp0: # Block address taken ; RV32I-SMALL-NEXT: .LBB2_2: # %return -; RV32I-SMALL-NEXT: addi a0, zero, 4 +; RV32I-SMALL-NEXT: li a0, 4 ; RV32I-SMALL-NEXT: addi sp, sp, 16 ; RV32I-SMALL-NEXT: ret ; RV32I-SMALL-NEXT: .LBB2_3: # %return.clone -; RV32I-SMALL-NEXT: addi a0, zero, 3 +; RV32I-SMALL-NEXT: li a0, 3 ; RV32I-SMALL-NEXT: addi sp, sp, 16 ; RV32I-SMALL-NEXT: ret ; @@ -83,7 +83,7 @@ ; RV32I-MEDIUM-NEXT: # Label of block must be emitted ; RV32I-MEDIUM-NEXT: auipc a1, %pcrel_hi(.Ltmp0) ; RV32I-MEDIUM-NEXT: addi a1, a1, %pcrel_lo(.LBB2_4) -; RV32I-MEDIUM-NEXT: addi a2, zero, 101 +; RV32I-MEDIUM-NEXT: li a2, 101 ; RV32I-MEDIUM-NEXT: sw a1, 8(sp) ; RV32I-MEDIUM-NEXT: blt a0, a2, .LBB2_3 ; RV32I-MEDIUM-NEXT: # %bb.1: # %if.then @@ -91,11 +91,11 @@ ; RV32I-MEDIUM-NEXT: jr a0 ; RV32I-MEDIUM-NEXT: .Ltmp0: # Block address taken ; RV32I-MEDIUM-NEXT: .LBB2_2: # %return -; RV32I-MEDIUM-NEXT: addi a0, zero, 4 +; RV32I-MEDIUM-NEXT: li a0, 4 ; RV32I-MEDIUM-NEXT: addi sp, sp, 16 ; RV32I-MEDIUM-NEXT: ret ; RV32I-MEDIUM-NEXT: .LBB2_3: # %return.clone -; RV32I-MEDIUM-NEXT: addi a0, zero, 3 +; RV32I-MEDIUM-NEXT: li a0, 3 ; RV32I-MEDIUM-NEXT: addi sp, sp, 16 ; RV32I-MEDIUM-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll --- a/llvm/test/CodeGen/RISCV/copysign-casts.ll +++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll @@ -38,7 +38,7 @@ ; ; RV64I-LABEL: fold_promote_d_s: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: srli a2, a2, 1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: lui a2, 524288 @@ -109,7 +109,7 @@ ; ; RV64I-LABEL: fold_promote_d_h: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: srli a2, a2, 1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: lui a2, 8 @@ -290,7 +290,7 @@ ; RV64I-NEXT: lui a2, 524288 ; RV64I-NEXT: addiw a2, a2, -1 ; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: slli a2, a2, 63 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a1, a1, 32 @@ -354,7 +354,7 @@ ; RV64I-NEXT: lui a2, 8 ; RV64I-NEXT: addiw a2, a2, -1 ; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: slli a2, a2, 31 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a1, a1, 16 @@ -448,7 +448,7 @@ ; RV64I-NEXT: lui a2, 8 ; RV64I-NEXT: addiw a2, a2, -1 ; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: slli a2, a2, 63 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a1, a1, 48 @@ -496,7 +496,7 @@ ; RV64IFD-NEXT: lui a2, 8 ; RV64IFD-NEXT: addiw a2, a2, -1 ; RV64IFD-NEXT: and a1, a1, a2 -; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: li a2, -1 ; RV64IFD-NEXT: slli a2, a2, 63 ; RV64IFD-NEXT: and a0, a0, a2 ; RV64IFD-NEXT: srli a0, a0, 48 diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll --- a/llvm/test/CodeGen/RISCV/div.ll +++ b/llvm/test/CodeGen/RISCV/div.ll @@ -49,7 +49,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -69,7 +69,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -120,7 +120,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: addi a0, zero, 10 +; RV32I-NEXT: li a0, 10 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -128,7 +128,7 @@ ; ; RV32IM-LABEL: udiv_constant_lhs: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, 10 +; RV32IM-NEXT: li a1, 10 ; RV32IM-NEXT: divu a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -138,7 +138,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a1, a0, 32 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -146,7 +146,7 @@ ; ; RV64IM-LABEL: udiv_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: divuw a0, a1, a0 ; RV64IM-NEXT: ret %1 = udiv i32 10, %a @@ -194,8 +194,8 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a2, 5 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __udivdi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -205,8 +205,8 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: addi sp, sp, -16 ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IM-NEXT: addi a2, zero, 5 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a2, 5 +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __udivdi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -216,7 +216,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -246,8 +246,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: addi a0, zero, 10 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, 10 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __udivdi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -259,8 +259,8 @@ ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IM-NEXT: mv a3, a1 ; RV32IM-NEXT: mv a2, a0 -; RV32IM-NEXT: addi a0, zero, 10 -; RV32IM-NEXT: mv a1, zero +; RV32IM-NEXT: li a0, 10 +; RV32IM-NEXT: li a1, 0 ; RV32IM-NEXT: call __udivdi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -271,7 +271,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -279,7 +279,7 @@ ; ; RV64IM-LABEL: udiv64_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: divu a0, a1, a0 ; RV64IM-NEXT: ret %1 = udiv i64 10, %a @@ -332,7 +332,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: andi a0, a0, 255 -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -341,7 +341,7 @@ ; RV32IM-LABEL: udiv8_constant: ; RV32IM: # %bb.0: ; RV32IM-NEXT: andi a0, a0, 255 -; RV32IM-NEXT: addi a1, zero, 205 +; RV32IM-NEXT: li a1, 205 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: srli a0, a0, 10 ; RV32IM-NEXT: ret @@ -351,7 +351,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: andi a0, a0, 255 -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -360,7 +360,7 @@ ; RV64IM-LABEL: udiv8_constant: ; RV64IM: # %bb.0: ; RV64IM-NEXT: andi a0, a0, 255 -; RV64IM-NEXT: addi a1, zero, 205 +; RV64IM-NEXT: li a1, 205 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: srli a0, a0, 10 ; RV64IM-NEXT: ret @@ -402,7 +402,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: andi a1, a0, 255 -; RV32I-NEXT: addi a0, zero, 10 +; RV32I-NEXT: li a0, 10 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -411,7 +411,7 @@ ; RV32IM-LABEL: udiv8_constant_lhs: ; RV32IM: # %bb.0: ; RV32IM-NEXT: andi a0, a0, 255 -; RV32IM-NEXT: addi a1, zero, 10 +; RV32IM-NEXT: li a1, 10 ; RV32IM-NEXT: divu a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -420,7 +420,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: andi a1, a0, 255 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -429,7 +429,7 @@ ; RV64IM-LABEL: udiv8_constant_lhs: ; RV64IM: # %bb.0: ; RV64IM-NEXT: andi a0, a0, 255 -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: divuw a0, a1, a0 ; RV64IM-NEXT: ret %1 = udiv i8 10, %a @@ -492,7 +492,7 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -516,7 +516,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -572,7 +572,7 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a1, a0, a1 -; RV32I-NEXT: addi a0, zero, 10 +; RV32I-NEXT: li a0, 10 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -583,7 +583,7 @@ ; RV32IM-NEXT: lui a1, 16 ; RV32IM-NEXT: addi a1, a1, -1 ; RV32IM-NEXT: and a0, a0, a1 -; RV32IM-NEXT: addi a1, zero, 10 +; RV32IM-NEXT: li a1, 10 ; RV32IM-NEXT: divu a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -594,7 +594,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a1, a0, a1 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -605,7 +605,7 @@ ; RV64IM-NEXT: lui a1, 16 ; RV64IM-NEXT: addiw a1, a1, -1 ; RV64IM-NEXT: and a0, a0, a1 -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: divuw a0, a1, a0 ; RV64IM-NEXT: ret %1 = udiv i16 10, %a @@ -651,7 +651,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -672,7 +672,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -770,7 +770,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: addi a0, zero, -10 +; RV32I-NEXT: li a0, -10 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -778,7 +778,7 @@ ; ; RV32IM-LABEL: sdiv_constant_lhs: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, -10 +; RV32IM-NEXT: li a1, -10 ; RV32IM-NEXT: div a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -787,7 +787,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a1, a0 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -795,7 +795,7 @@ ; ; RV64IM-LABEL: sdiv_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: divw a0, a1, a0 ; RV64IM-NEXT: ret %1 = sdiv i32 -10, %a @@ -843,8 +843,8 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 5 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a2, 5 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __divdi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -854,8 +854,8 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: addi sp, sp, -16 ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IM-NEXT: addi a2, zero, 5 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a2, 5 +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __divdi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -865,7 +865,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -897,8 +897,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: addi a0, zero, 10 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, 10 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __divdi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -910,8 +910,8 @@ ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IM-NEXT: mv a3, a1 ; RV32IM-NEXT: mv a2, a0 -; RV32IM-NEXT: addi a0, zero, 10 -; RV32IM-NEXT: mv a1, zero +; RV32IM-NEXT: li a0, 10 +; RV32IM-NEXT: li a1, 0 ; RV32IM-NEXT: call __divdi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -922,7 +922,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -930,7 +930,7 @@ ; ; RV64IM-LABEL: sdiv64_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: div a0, a1, a0 ; RV64IM-NEXT: ret %1 = sdiv i64 10, %a @@ -1043,7 +1043,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1053,7 +1053,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 24 ; RV32IM-NEXT: srai a0, a0, 24 -; RV32IM-NEXT: addi a1, zero, 103 +; RV32IM-NEXT: li a1, 103 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: srai a1, a0, 9 ; RV32IM-NEXT: srli a0, a0, 15 @@ -1067,7 +1067,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1077,7 +1077,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srai a0, a0, 56 -; RV64IM-NEXT: addi a1, zero, 103 +; RV64IM-NEXT: li a1, 103 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: srai a1, a0, 9 ; RV64IM-NEXT: srli a0, a0, 15 @@ -1143,7 +1143,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a1, a0, 24 -; RV32I-NEXT: addi a0, zero, -10 +; RV32I-NEXT: li a0, -10 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1153,7 +1153,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 24 ; RV32IM-NEXT: srai a0, a0, 24 -; RV32IM-NEXT: addi a1, zero, -10 +; RV32IM-NEXT: li a1, -10 ; RV32IM-NEXT: div a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -1163,7 +1163,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a1, a0, 56 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1173,7 +1173,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srai a0, a0, 56 -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: divw a0, a1, a0 ; RV64IM-NEXT: ret %1 = sdiv i8 -10, %a @@ -1235,7 +1235,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1259,7 +1259,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1335,7 +1335,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a1, a0, 16 -; RV32I-NEXT: addi a0, zero, -10 +; RV32I-NEXT: li a0, -10 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1345,7 +1345,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 16 ; RV32IM-NEXT: srai a0, a0, 16 -; RV32IM-NEXT: addi a1, zero, -10 +; RV32IM-NEXT: li a1, -10 ; RV32IM-NEXT: div a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -1355,7 +1355,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a1, a0, 48 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -1365,7 +1365,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 48 ; RV64IM-NEXT: srai a0, a0, 48 -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: divw a0, a1, a0 ; RV64IM-NEXT: ret %1 = sdiv i16 -10, %a diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -227,7 +227,7 @@ ; ; RV64IFD-LABEL: fsgnjn_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: li a2, -1 ; RV64IFD-NEXT: slli a2, a2, 63 ; RV64IFD-NEXT: xor a1, a1, a2 ; RV64IFD-NEXT: fmv.d.x ft0, a1 diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -32,14 +32,14 @@ ; ; RV64I-LABEL: fneg: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IFD-LABEL: fneg: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: li a1, -1 ; RV64IFD-NEXT: slli a1, a1, 63 ; RV64IFD-NEXT: xor a0, a0, a1 ; RV64IFD-NEXT: ret @@ -66,14 +66,14 @@ ; ; RV64I-LABEL: fabs: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: srli a1, a1, 1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IFD-LABEL: fabs: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: li a1, -1 ; RV64IFD-NEXT: srli a1, a1, 1 ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: ret @@ -117,7 +117,7 @@ ; RV64I-LABEL: fcopysign_fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: not a1, a1 -; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: slli a3, a2, 63 ; RV64I-NEXT: and a1, a1, a3 ; RV64I-NEXT: srli a2, a2, 1 @@ -127,7 +127,7 @@ ; ; RV64IFD-LABEL: fcopysign_fneg: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: li a2, -1 ; RV64IFD-NEXT: slli a2, a2, 63 ; RV64IFD-NEXT: xor a1, a1, a2 ; RV64IFD-NEXT: fmv.d.x ft0, a1 diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -10,7 +10,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: br_fcmp_false: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi a0, zero, 1 +; RV32IFD-NEXT: li a0, 1 ; RV32IFD-NEXT: bnez a0, .LBB0_2 ; RV32IFD-NEXT: # %bb.1: # %if.then ; RV32IFD-NEXT: ret @@ -21,7 +21,7 @@ ; ; RV64IFD-LABEL: br_fcmp_false: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: li a0, 1 ; RV64IFD-NEXT: bnez a0, .LBB0_2 ; RV64IFD-NEXT: # %bb.1: # %if.then ; RV64IFD-NEXT: ret @@ -676,7 +676,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind { ; RV32IFD-LABEL: br_fcmp_true: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi a0, zero, 1 +; RV32IFD-NEXT: li a0, 1 ; RV32IFD-NEXT: bnez a0, .LBB16_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: ret @@ -687,7 +687,7 @@ ; ; RV64IFD-LABEL: br_fcmp_true: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: li a0, 1 ; RV64IFD-NEXT: bnez a0, .LBB16_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll @@ -81,12 +81,12 @@ ; RV32IFD-NEXT: addi a6, a0, 327 ; RV32IFD-NEXT: lui a0, 713032 ; RV32IFD-NEXT: addi a5, a0, -1311 -; RV32IFD-NEXT: addi a0, zero, 1 -; RV32IFD-NEXT: addi a1, zero, 2 -; RV32IFD-NEXT: addi a3, zero, 3 +; RV32IFD-NEXT: li a0, 1 +; RV32IFD-NEXT: li a1, 2 +; RV32IFD-NEXT: li a3, 3 ; RV32IFD-NEXT: sw a2, 0(sp) -; RV32IFD-NEXT: mv a2, zero -; RV32IFD-NEXT: mv a4, zero +; RV32IFD-NEXT: li a2, 0 +; RV32IFD-NEXT: li a4, 0 ; RV32IFD-NEXT: mv a7, a5 ; RV32IFD-NEXT: call callee_double_split_reg_stack@plt ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -126,15 +126,15 @@ ; RV32IFD-NEXT: lui a0, 262574 ; RV32IFD-NEXT: addi a0, a0, 327 ; RV32IFD-NEXT: sw a0, 12(sp) -; RV32IFD-NEXT: addi a0, zero, 1 -; RV32IFD-NEXT: addi a2, zero, 2 -; RV32IFD-NEXT: addi a4, zero, 3 -; RV32IFD-NEXT: addi a6, zero, 4 +; RV32IFD-NEXT: li a0, 1 +; RV32IFD-NEXT: li a2, 2 +; RV32IFD-NEXT: li a4, 3 +; RV32IFD-NEXT: li a6, 4 ; RV32IFD-NEXT: sw a1, 8(sp) -; RV32IFD-NEXT: mv a1, zero -; RV32IFD-NEXT: mv a3, zero -; RV32IFD-NEXT: mv a5, zero -; RV32IFD-NEXT: mv a7, zero +; RV32IFD-NEXT: li a1, 0 +; RV32IFD-NEXT: li a3, 0 +; RV32IFD-NEXT: li a5, 0 +; RV32IFD-NEXT: li a7, 0 ; RV32IFD-NEXT: call callee_double_stack@plt ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -80,7 +80,7 @@ ; RV32IFD-NEXT: feq.d a0, ft0, ft0 ; RV32IFD-NEXT: bnez a0, .LBB3_2 ; RV32IFD-NEXT: # %bb.1: # %start -; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB3_2: @@ -94,7 +94,7 @@ ; RV64IFD-NEXT: feq.d a0, ft0, ft0 ; RV64IFD-NEXT: bnez a0, .LBB3_2 ; RV64IFD-NEXT: # %bb.1: # %start -; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: li a0, 0 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB3_2: ; RV64IFD-NEXT: fcvt.w.d a0, ft0, rtz @@ -138,7 +138,7 @@ ; RV32IFD-NEXT: sw a1, 12(sp) ; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: fcvt.wu.d a1, ft0, rtz -; RV32IFD-NEXT: addi a0, zero, 1 +; RV32IFD-NEXT: li a0, 1 ; RV32IFD-NEXT: beqz a1, .LBB5_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a0, a1 @@ -150,7 +150,7 @@ ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fcvt.wu.d a1, ft0, rtz -; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: li a0, 1 ; RV64IFD-NEXT: beqz a1, .LBB5_2 ; RV64IFD-NEXT: # %bb.1: ; RV64IFD-NEXT: mv a0, a1 @@ -172,7 +172,7 @@ ; RV32IFD-NEXT: feq.d a0, ft0, ft0 ; RV32IFD-NEXT: bnez a0, .LBB6_2 ; RV32IFD-NEXT: # %bb.1: # %start -; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB6_2: @@ -186,7 +186,7 @@ ; RV64IFD-NEXT: feq.d a0, ft0, ft0 ; RV64IFD-NEXT: bnez a0, .LBB6_2 ; RV64IFD-NEXT: # %bb.1: # %start -; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: li a0, 0 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB6_2: ; RV64IFD-NEXT: fcvt.wu.d a0, ft0, rtz @@ -319,12 +319,12 @@ ; RV32IFD-NEXT: mv a2, a0 ; RV32IFD-NEXT: bnez a3, .LBB12_2 ; RV32IFD-NEXT: # %bb.1: # %start -; RV32IFD-NEXT: mv a2, zero +; RV32IFD-NEXT: li a2, 0 ; RV32IFD-NEXT: .LBB12_2: # %start ; RV32IFD-NEXT: lui a0, %hi(.LCPI12_1) ; RV32IFD-NEXT: fld ft0, %lo(.LCPI12_1)(a0) ; RV32IFD-NEXT: flt.d a4, ft0, ft1 -; RV32IFD-NEXT: addi a0, zero, -1 +; RV32IFD-NEXT: li a0, -1 ; RV32IFD-NEXT: beqz a4, .LBB12_9 ; RV32IFD-NEXT: # %bb.3: # %start ; RV32IFD-NEXT: feq.d a2, ft1, ft1 @@ -337,7 +337,7 @@ ; RV32IFD-NEXT: .LBB12_6: # %start ; RV32IFD-NEXT: bnez a2, .LBB12_8 ; RV32IFD-NEXT: .LBB12_7: # %start -; RV32IFD-NEXT: mv a1, zero +; RV32IFD-NEXT: li a1, 0 ; RV32IFD-NEXT: .LBB12_8: # %start ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 @@ -347,7 +347,7 @@ ; RV32IFD-NEXT: feq.d a2, ft1, ft1 ; RV32IFD-NEXT: bnez a2, .LBB12_4 ; RV32IFD-NEXT: .LBB12_10: # %start -; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: lui a5, 524288 ; RV32IFD-NEXT: bnez a3, .LBB12_5 ; RV32IFD-NEXT: .LBB12_11: # %start @@ -364,7 +364,7 @@ ; RV64IFD-NEXT: feq.d a0, ft0, ft0 ; RV64IFD-NEXT: bnez a0, .LBB12_2 ; RV64IFD-NEXT: # %bb.1: # %start -; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: li a0, 0 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB12_2: ; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz @@ -410,13 +410,13 @@ ; RV32IFD-NEXT: mv a3, a0 ; RV32IFD-NEXT: bnez a4, .LBB14_2 ; RV32IFD-NEXT: # %bb.1: # %start -; RV32IFD-NEXT: mv a3, zero +; RV32IFD-NEXT: li a3, 0 ; RV32IFD-NEXT: .LBB14_2: # %start ; RV32IFD-NEXT: lui a0, %hi(.LCPI14_0) ; RV32IFD-NEXT: fld ft0, %lo(.LCPI14_0)(a0) ; RV32IFD-NEXT: flt.d a5, ft0, ft1 -; RV32IFD-NEXT: addi a2, zero, -1 -; RV32IFD-NEXT: addi a0, zero, -1 +; RV32IFD-NEXT: li a2, -1 +; RV32IFD-NEXT: li a0, -1 ; RV32IFD-NEXT: beqz a5, .LBB14_7 ; RV32IFD-NEXT: # %bb.3: # %start ; RV32IFD-NEXT: beqz a4, .LBB14_8 @@ -433,7 +433,7 @@ ; RV32IFD-NEXT: mv a0, a3 ; RV32IFD-NEXT: bnez a4, .LBB14_4 ; RV32IFD-NEXT: .LBB14_8: # %start -; RV32IFD-NEXT: mv a1, zero +; RV32IFD-NEXT: li a1, 0 ; RV32IFD-NEXT: beqz a5, .LBB14_5 ; RV32IFD-NEXT: j .LBB14_6 ; @@ -443,7 +443,7 @@ ; RV64IFD-NEXT: feq.d a0, ft0, ft0 ; RV64IFD-NEXT: bnez a0, .LBB14_2 ; RV64IFD-NEXT: # %bb.1: # %start -; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: li a0, 0 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB14_2: ; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -7,12 +7,12 @@ define i32 @fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: fcmp_false: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcmp_false: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: li a0, 0 ; RV64IFD-NEXT: ret %1 = fcmp false double %a, %b %2 = zext i1 %1 to i32 @@ -402,12 +402,12 @@ define i32 @fcmp_true(double %a, double %b) nounwind { ; RV32IFD-LABEL: fcmp_true: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi a0, zero, 1 +; RV32IFD-NEXT: li a0, 1 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcmp_true: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: li a0, 1 ; RV64IFD-NEXT: ret %1 = fcmp true double %a, %b %2 = zext i1 %1 to i32 diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -384,7 +384,7 @@ ; ; RV64IFD-LABEL: fabs_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: li a1, -1 ; RV64IFD-NEXT: srli a1, a1, 1 ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll --- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll +++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll @@ -18,7 +18,7 @@ ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: lui a1, 262144 -; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: call test@plt ; RV32IFD-NEXT: sw a0, 0(sp) ; RV32IFD-NEXT: sw a1, 4(sp) @@ -36,7 +36,7 @@ ; RV32IFD-NEXT: # %bb.1: # %if.then ; RV32IFD-NEXT: call abort@plt ; RV32IFD-NEXT: .LBB1_2: # %if.end -; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: call exit@plt entry: %call = call double @test(double 2.000000e+00) diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll --- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll @@ -110,7 +110,7 @@ ; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64F-NEXT: mv s0, a0 ; RV64F-NEXT: call __adddf3@plt -; RV64F-NEXT: addi a1, zero, -1 +; RV64F-NEXT: li a1, -1 ; RV64F-NEXT: srli a1, a1, 1 ; RV64F-NEXT: and a1, a0, a1 ; RV64F-NEXT: mv a0, s0 @@ -233,7 +233,7 @@ ; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64F-NEXT: mv s0, a0 ; RV64F-NEXT: call __muldf3@plt -; RV64F-NEXT: addi a1, zero, -1 +; RV64F-NEXT: li a1, -1 ; RV64F-NEXT: slli a1, a1, 63 ; RV64F-NEXT: xor a1, a0, a1 ; RV64F-NEXT: mv a0, s0 @@ -360,7 +360,7 @@ ; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64F-NEXT: mv s0, a0 ; RV64F-NEXT: call __muldf3@plt -; RV64F-NEXT: addi a1, zero, -1 +; RV64F-NEXT: li a1, -1 ; RV64F-NEXT: slli a1, a1, 63 ; RV64F-NEXT: or a1, a0, a1 ; RV64F-NEXT: mv a0, s0 diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -11,7 +11,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_false: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi a0, zero, 1 +; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: bnez a0, .LBB0_2 ; RV32IF-NEXT: # %bb.1: # %if.then ; RV32IF-NEXT: ret @@ -22,7 +22,7 @@ ; ; RV64IF-LABEL: br_fcmp_false: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: bnez a0, .LBB0_2 ; RV64IF-NEXT: # %bb.1: # %if.then ; RV64IF-NEXT: ret @@ -587,7 +587,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_true: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi a0, zero, 1 +; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: bnez a0, .LBB16_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: ret @@ -598,7 +598,7 @@ ; ; RV64IF-LABEL: br_fcmp_true: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: bnez a0, .LBB16_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ret @@ -623,7 +623,7 @@ ; RV32IF: # %bb.0: # %entry ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: call dummy@plt ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, zero @@ -631,14 +631,14 @@ ; RV32IF-NEXT: feq.s a0, ft0, ft1 ; RV32IF-NEXT: beqz a0, .LBB17_3 ; RV32IF-NEXT: # %bb.1: # %if.end -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: call dummy@plt ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: flw ft1, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: feq.s a0, ft0, ft1 ; RV32IF-NEXT: beqz a0, .LBB17_3 ; RV32IF-NEXT: # %bb.2: # %if.end4 -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret @@ -667,7 +667,7 @@ ; RV64IF-NEXT: feq.s a0, ft0, ft1 ; RV64IF-NEXT: beqz a0, .LBB17_3 ; RV64IF-NEXT: # %bb.2: # %if.end4 -; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64IF-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -29,7 +29,7 @@ ; RV32IF-NEXT: feq.s a0, ft0, ft0 ; RV32IF-NEXT: bnez a0, .LBB1_2 ; RV32IF-NEXT: # %bb.1: # %start -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB1_2: ; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz @@ -41,7 +41,7 @@ ; RV64IF-NEXT: feq.s a0, ft0, ft0 ; RV64IF-NEXT: bnez a0, .LBB1_2 ; RV64IF-NEXT: # %bb.1: # %start -; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB1_2: ; RV64IF-NEXT: fcvt.w.s a0, ft0, rtz @@ -77,7 +77,7 @@ ; RV32IF: # %bb.0: ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fcvt.wu.s a1, ft0, rtz -; RV32IF-NEXT: addi a0, zero, 1 +; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: beqz a1, .LBB3_2 ; RV32IF-NEXT: # %bb.1: ; RV32IF-NEXT: mv a0, a1 @@ -88,7 +88,7 @@ ; RV64IF: # %bb.0: ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fcvt.wu.s a1, ft0, rtz -; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: beqz a1, .LBB3_2 ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: mv a0, a1 @@ -107,7 +107,7 @@ ; RV32IF-NEXT: feq.s a0, ft0, ft0 ; RV32IF-NEXT: bnez a0, .LBB4_2 ; RV32IF-NEXT: # %bb.1: # %start -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB4_2: ; RV32IF-NEXT: fcvt.wu.s a0, ft0, rtz @@ -119,7 +119,7 @@ ; RV64IF-NEXT: feq.s a0, ft0, ft0 ; RV64IF-NEXT: bnez a0, .LBB4_2 ; RV64IF-NEXT: # %bb.1: # %start -; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB4_2: ; RV64IF-NEXT: fcvt.wu.s a0, ft0, rtz @@ -279,14 +279,14 @@ ; RV32IF-NEXT: mv a2, a0 ; RV32IF-NEXT: bnez s0, .LBB12_2 ; RV32IF-NEXT: # %bb.1: # %start -; RV32IF-NEXT: mv a2, zero +; RV32IF-NEXT: li a2, 0 ; RV32IF-NEXT: .LBB12_2: # %start ; RV32IF-NEXT: lui a0, %hi(.LCPI12_1) ; RV32IF-NEXT: flw ft0, %lo(.LCPI12_1)(a0) ; RV32IF-NEXT: flw ft1, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: flt.s a3, ft0, ft1 ; RV32IF-NEXT: fmv.s ft0, ft1 -; RV32IF-NEXT: addi a0, zero, -1 +; RV32IF-NEXT: li a0, -1 ; RV32IF-NEXT: beqz a3, .LBB12_9 ; RV32IF-NEXT: # %bb.3: # %start ; RV32IF-NEXT: feq.s a2, ft0, ft0 @@ -299,7 +299,7 @@ ; RV32IF-NEXT: .LBB12_6: # %start ; RV32IF-NEXT: bnez a2, .LBB12_8 ; RV32IF-NEXT: .LBB12_7: # %start -; RV32IF-NEXT: mv a1, zero +; RV32IF-NEXT: li a1, 0 ; RV32IF-NEXT: .LBB12_8: # %start ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -310,7 +310,7 @@ ; RV32IF-NEXT: feq.s a2, ft0, ft0 ; RV32IF-NEXT: bnez a2, .LBB12_4 ; RV32IF-NEXT: .LBB12_10: # %start -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: lui a4, 524288 ; RV32IF-NEXT: bnez s0, .LBB12_5 ; RV32IF-NEXT: .LBB12_11: # %start @@ -327,7 +327,7 @@ ; RV64IF-NEXT: feq.s a0, ft0, ft0 ; RV64IF-NEXT: bnez a0, .LBB12_2 ; RV64IF-NEXT: # %bb.1: # %start -; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB12_2: ; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz @@ -371,14 +371,14 @@ ; RV32IF-NEXT: mv a3, a0 ; RV32IF-NEXT: bnez s0, .LBB14_2 ; RV32IF-NEXT: # %bb.1: # %start -; RV32IF-NEXT: mv a3, zero +; RV32IF-NEXT: li a3, 0 ; RV32IF-NEXT: .LBB14_2: # %start ; RV32IF-NEXT: lui a0, %hi(.LCPI14_0) ; RV32IF-NEXT: flw ft0, %lo(.LCPI14_0)(a0) ; RV32IF-NEXT: flw ft1, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: flt.s a4, ft0, ft1 -; RV32IF-NEXT: addi a2, zero, -1 -; RV32IF-NEXT: addi a0, zero, -1 +; RV32IF-NEXT: li a2, -1 +; RV32IF-NEXT: li a0, -1 ; RV32IF-NEXT: beqz a4, .LBB14_7 ; RV32IF-NEXT: # %bb.3: # %start ; RV32IF-NEXT: beqz s0, .LBB14_8 @@ -396,7 +396,7 @@ ; RV32IF-NEXT: mv a0, a3 ; RV32IF-NEXT: bnez s0, .LBB14_4 ; RV32IF-NEXT: .LBB14_8: # %start -; RV32IF-NEXT: mv a1, zero +; RV32IF-NEXT: li a1, 0 ; RV32IF-NEXT: beqz a4, .LBB14_5 ; RV32IF-NEXT: j .LBB14_6 ; @@ -406,7 +406,7 @@ ; RV64IF-NEXT: feq.s a0, ft0, ft0 ; RV64IF-NEXT: bnez a0, .LBB14_2 ; RV64IF-NEXT: # %bb.1: # %start -; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB14_2: ; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -7,12 +7,12 @@ define i32 @fcmp_false(float %a, float %b) nounwind { ; RV32IF-LABEL: fcmp_false: ; RV32IF: # %bb.0: -; RV32IF-NEXT: mv a0, zero +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcmp_false: ; RV64IF: # %bb.0: -; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ret %1 = fcmp false float %a, %b %2 = zext i1 %1 to i32 @@ -318,12 +318,12 @@ define i32 @fcmp_true(float %a, float %b) nounwind { ; RV32IF-LABEL: fcmp_true: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi a0, zero, 1 +; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcmp_true: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: ret %1 = fcmp true float %a, %b %2 = zext i1 %1 to i32 diff --git a/llvm/test/CodeGen/RISCV/flt-rounds.ll b/llvm/test/CodeGen/RISCV/flt-rounds.ll --- a/llvm/test/CodeGen/RISCV/flt-rounds.ll +++ b/llvm/test/CodeGen/RISCV/flt-rounds.ll @@ -9,12 +9,12 @@ define i32 @test_flt_rounds() nounwind { ; RV32I-LABEL: test_flt_rounds: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_flt_rounds: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: ret %1 = call i32 @llvm.flt.rounds() ret i32 %1 diff --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll --- a/llvm/test/CodeGen/RISCV/fp-imm.ll +++ b/llvm/test/CodeGen/RISCV/fp-imm.ll @@ -61,8 +61,8 @@ define double @f64_positive_zero(double *%pd) nounwind { ; RV32F-LABEL: f64_positive_zero: ; RV32F: # %bb.0: -; RV32F-NEXT: mv a0, zero -; RV32F-NEXT: mv a1, zero +; RV32F-NEXT: li a0, 0 +; RV32F-NEXT: li a1, 0 ; RV32F-NEXT: ret ; ; RV32D-LABEL: f64_positive_zero: @@ -72,7 +72,7 @@ ; ; RV64F-LABEL: f64_positive_zero: ; RV64F: # %bb.0: -; RV64F-NEXT: mv a0, zero +; RV64F-NEXT: li a0, 0 ; RV64F-NEXT: ret ; ; RV64D-LABEL: f64_positive_zero: @@ -86,7 +86,7 @@ ; RV32F-LABEL: f64_negative_zero: ; RV32F: # %bb.0: ; RV32F-NEXT: lui a1, 524288 -; RV32F-NEXT: mv a0, zero +; RV32F-NEXT: li a0, 0 ; RV32F-NEXT: ret ; ; RV32D-LABEL: f64_negative_zero: @@ -97,7 +97,7 @@ ; ; RV64F-LABEL: f64_negative_zero: ; RV64F: # %bb.0: -; RV64F-NEXT: addi a0, zero, -1 +; RV64F-NEXT: li a0, -1 ; RV64F-NEXT: slli a0, a0, 63 ; RV64F-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll --- a/llvm/test/CodeGen/RISCV/frame.ll +++ b/llvm/test/CodeGen/RISCV/frame.ll @@ -18,7 +18,7 @@ ; RV32I-FPELIM-NEXT: sw zero, 8(sp) ; RV32I-FPELIM-NEXT: addi a0, sp, 12 ; RV32I-FPELIM-NEXT: call test1@plt -; RV32I-FPELIM-NEXT: mv a0, zero +; RV32I-FPELIM-NEXT: li a0, 0 ; RV32I-FPELIM-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: addi sp, sp, 32 ; RV32I-FPELIM-NEXT: ret @@ -36,7 +36,7 @@ ; RV32I-WITHFP-NEXT: sw zero, -32(s0) ; RV32I-WITHFP-NEXT: addi a0, s0, -28 ; RV32I-WITHFP-NEXT: call test1@plt -; RV32I-WITHFP-NEXT: mv a0, zero +; RV32I-WITHFP-NEXT: li a0, 0 ; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll @@ -11,7 +11,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_false: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi a0, zero, 1 +; RV32IZFH-NEXT: li a0, 1 ; RV32IZFH-NEXT: bnez a0, .LBB0_2 ; RV32IZFH-NEXT: # %bb.1: # %if.then ; RV32IZFH-NEXT: ret @@ -22,7 +22,7 @@ ; ; RV64IZFH-LABEL: br_fcmp_false: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi a0, zero, 1 +; RV64IZFH-NEXT: li a0, 1 ; RV64IZFH-NEXT: bnez a0, .LBB0_2 ; RV64IZFH-NEXT: # %bb.1: # %if.then ; RV64IZFH-NEXT: ret @@ -527,7 +527,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_true: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi a0, zero, 1 +; RV32IZFH-NEXT: li a0, 1 ; RV32IZFH-NEXT: bnez a0, .LBB16_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: ret @@ -538,7 +538,7 @@ ; ; RV64IZFH-LABEL: br_fcmp_true: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi a0, zero, 1 +; RV64IZFH-NEXT: li a0, 1 ; RV64IZFH-NEXT: bnez a0, .LBB16_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -39,7 +39,7 @@ ; RV32IZFH-NEXT: feq.s a0, ft0, ft0 ; RV32IZFH-NEXT: bnez a0, .LBB1_2 ; RV32IZFH-NEXT: # %bb.1: # %start -; RV32IZFH-NEXT: mv a0, zero +; RV32IZFH-NEXT: li a0, 0 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB1_2: ; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0) @@ -57,7 +57,7 @@ ; RV32IDZFH-NEXT: feq.s a0, ft0, ft0 ; RV32IDZFH-NEXT: bnez a0, .LBB1_2 ; RV32IDZFH-NEXT: # %bb.1: # %start -; RV32IDZFH-NEXT: mv a0, zero +; RV32IDZFH-NEXT: li a0, 0 ; RV32IDZFH-NEXT: ret ; RV32IDZFH-NEXT: .LBB1_2: ; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_0) @@ -75,7 +75,7 @@ ; RV64IZFH-NEXT: feq.s a0, ft0, ft0 ; RV64IZFH-NEXT: bnez a0, .LBB1_2 ; RV64IZFH-NEXT: # %bb.1: # %start -; RV64IZFH-NEXT: mv a0, zero +; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB1_2: ; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_0) @@ -93,7 +93,7 @@ ; RV64IDZFH-NEXT: feq.s a0, ft0, ft0 ; RV64IDZFH-NEXT: bnez a0, .LBB1_2 ; RV64IDZFH-NEXT: # %bb.1: # %start -; RV64IDZFH-NEXT: mv a0, zero +; RV64IDZFH-NEXT: li a0, 0 ; RV64IDZFH-NEXT: ret ; RV64IDZFH-NEXT: .LBB1_2: ; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_0) @@ -141,7 +141,7 @@ ; RV32IZFH-LABEL: fcvt_ui_h_multiple_use: ; RV32IZFH: # %bb.0: ; RV32IZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV32IZFH-NEXT: addi a0, zero, 1 +; RV32IZFH-NEXT: li a0, 1 ; RV32IZFH-NEXT: beqz a1, .LBB3_2 ; RV32IZFH-NEXT: # %bb.1: ; RV32IZFH-NEXT: mv a0, a1 @@ -151,7 +151,7 @@ ; RV32IDZFH-LABEL: fcvt_ui_h_multiple_use: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV32IDZFH-NEXT: addi a0, zero, 1 +; RV32IDZFH-NEXT: li a0, 1 ; RV32IDZFH-NEXT: beqz a1, .LBB3_2 ; RV32IDZFH-NEXT: # %bb.1: ; RV32IDZFH-NEXT: mv a0, a1 @@ -161,7 +161,7 @@ ; RV64IZFH-LABEL: fcvt_ui_h_multiple_use: ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV64IZFH-NEXT: addi a0, zero, 1 +; RV64IZFH-NEXT: li a0, 1 ; RV64IZFH-NEXT: beqz a1, .LBB3_2 ; RV64IZFH-NEXT: # %bb.1: ; RV64IZFH-NEXT: mv a0, a1 @@ -171,7 +171,7 @@ ; RV64IDZFH-LABEL: fcvt_ui_h_multiple_use: ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV64IDZFH-NEXT: addi a0, zero, 1 +; RV64IDZFH-NEXT: li a0, 1 ; RV64IDZFH-NEXT: beqz a1, .LBB3_2 ; RV64IDZFH-NEXT: # %bb.1: ; RV64IDZFH-NEXT: mv a0, a1 @@ -263,7 +263,7 @@ ; RV32IZFH-NEXT: feq.h a0, fa0, fa0 ; RV32IZFH-NEXT: bnez a0, .LBB6_2 ; RV32IZFH-NEXT: # %bb.1: # %start -; RV32IZFH-NEXT: mv a0, zero +; RV32IZFH-NEXT: li a0, 0 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB6_2: ; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rtz @@ -274,7 +274,7 @@ ; RV32IDZFH-NEXT: feq.h a0, fa0, fa0 ; RV32IDZFH-NEXT: bnez a0, .LBB6_2 ; RV32IDZFH-NEXT: # %bb.1: # %start -; RV32IDZFH-NEXT: mv a0, zero +; RV32IDZFH-NEXT: li a0, 0 ; RV32IDZFH-NEXT: ret ; RV32IDZFH-NEXT: .LBB6_2: ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rtz @@ -285,7 +285,7 @@ ; RV64IZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IZFH-NEXT: bnez a0, .LBB6_2 ; RV64IZFH-NEXT: # %bb.1: # %start -; RV64IZFH-NEXT: mv a0, zero +; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB6_2: ; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz @@ -296,7 +296,7 @@ ; RV64IDZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IDZFH-NEXT: bnez a0, .LBB6_2 ; RV64IDZFH-NEXT: # %bb.1: # %start -; RV64IDZFH-NEXT: mv a0, zero +; RV64IDZFH-NEXT: li a0, 0 ; RV64IDZFH-NEXT: ret ; RV64IDZFH-NEXT: .LBB6_2: ; RV64IDZFH-NEXT: fcvt.w.h a0, fa0, rtz @@ -337,7 +337,7 @@ ; RV32IZFH-NEXT: feq.h a0, fa0, fa0 ; RV32IZFH-NEXT: bnez a0, .LBB8_2 ; RV32IZFH-NEXT: # %bb.1: # %start -; RV32IZFH-NEXT: mv a0, zero +; RV32IZFH-NEXT: li a0, 0 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB8_2: ; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -348,7 +348,7 @@ ; RV32IDZFH-NEXT: feq.h a0, fa0, fa0 ; RV32IDZFH-NEXT: bnez a0, .LBB8_2 ; RV32IDZFH-NEXT: # %bb.1: # %start -; RV32IDZFH-NEXT: mv a0, zero +; RV32IDZFH-NEXT: li a0, 0 ; RV32IDZFH-NEXT: ret ; RV32IDZFH-NEXT: .LBB8_2: ; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -359,7 +359,7 @@ ; RV64IZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IZFH-NEXT: bnez a0, .LBB8_2 ; RV64IZFH-NEXT: # %bb.1: # %start -; RV64IZFH-NEXT: mv a0, zero +; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB8_2: ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -370,7 +370,7 @@ ; RV64IDZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IDZFH-NEXT: bnez a0, .LBB8_2 ; RV64IDZFH-NEXT: # %bb.1: # %start -; RV64IDZFH-NEXT: mv a0, zero +; RV64IDZFH-NEXT: li a0, 0 ; RV64IDZFH-NEXT: ret ; RV64IDZFH-NEXT: .LBB8_2: ; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -429,12 +429,12 @@ ; RV32IZFH-NEXT: mv a2, a0 ; RV32IZFH-NEXT: bnez s0, .LBB10_2 ; RV32IZFH-NEXT: # %bb.1: # %start -; RV32IZFH-NEXT: mv a2, zero +; RV32IZFH-NEXT: li a2, 0 ; RV32IZFH-NEXT: .LBB10_2: # %start ; RV32IZFH-NEXT: lui a0, %hi(.LCPI10_1) ; RV32IZFH-NEXT: flw ft0, %lo(.LCPI10_1)(a0) ; RV32IZFH-NEXT: flt.s a3, ft0, fs0 -; RV32IZFH-NEXT: addi a0, zero, -1 +; RV32IZFH-NEXT: li a0, -1 ; RV32IZFH-NEXT: beqz a3, .LBB10_9 ; RV32IZFH-NEXT: # %bb.3: # %start ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 @@ -447,7 +447,7 @@ ; RV32IZFH-NEXT: .LBB10_6: # %start ; RV32IZFH-NEXT: bnez a2, .LBB10_8 ; RV32IZFH-NEXT: .LBB10_7: # %start -; RV32IZFH-NEXT: mv a1, zero +; RV32IZFH-NEXT: li a1, 0 ; RV32IZFH-NEXT: .LBB10_8: # %start ; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -459,7 +459,7 @@ ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: bnez a2, .LBB10_4 ; RV32IZFH-NEXT: .LBB10_10: # %start -; RV32IZFH-NEXT: mv a0, zero +; RV32IZFH-NEXT: li a0, 0 ; RV32IZFH-NEXT: lui a4, 524288 ; RV32IZFH-NEXT: bnez s0, .LBB10_5 ; RV32IZFH-NEXT: .LBB10_11: # %start @@ -485,12 +485,12 @@ ; RV32IDZFH-NEXT: mv a2, a0 ; RV32IDZFH-NEXT: bnez s0, .LBB10_2 ; RV32IDZFH-NEXT: # %bb.1: # %start -; RV32IDZFH-NEXT: mv a2, zero +; RV32IDZFH-NEXT: li a2, 0 ; RV32IDZFH-NEXT: .LBB10_2: # %start ; RV32IDZFH-NEXT: lui a0, %hi(.LCPI10_1) ; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI10_1)(a0) ; RV32IDZFH-NEXT: flt.s a3, ft0, fs0 -; RV32IDZFH-NEXT: addi a0, zero, -1 +; RV32IDZFH-NEXT: li a0, -1 ; RV32IDZFH-NEXT: beqz a3, .LBB10_9 ; RV32IDZFH-NEXT: # %bb.3: # %start ; RV32IDZFH-NEXT: feq.s a2, fs0, fs0 @@ -503,7 +503,7 @@ ; RV32IDZFH-NEXT: .LBB10_6: # %start ; RV32IDZFH-NEXT: bnez a2, .LBB10_8 ; RV32IDZFH-NEXT: .LBB10_7: # %start -; RV32IDZFH-NEXT: mv a1, zero +; RV32IDZFH-NEXT: li a1, 0 ; RV32IDZFH-NEXT: .LBB10_8: # %start ; RV32IDZFH-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IDZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -515,7 +515,7 @@ ; RV32IDZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IDZFH-NEXT: bnez a2, .LBB10_4 ; RV32IDZFH-NEXT: .LBB10_10: # %start -; RV32IDZFH-NEXT: mv a0, zero +; RV32IDZFH-NEXT: li a0, 0 ; RV32IDZFH-NEXT: lui a4, 524288 ; RV32IDZFH-NEXT: bnez s0, .LBB10_5 ; RV32IDZFH-NEXT: .LBB10_11: # %start @@ -531,7 +531,7 @@ ; RV64IZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IZFH-NEXT: bnez a0, .LBB10_2 ; RV64IZFH-NEXT: # %bb.1: # %start -; RV64IZFH-NEXT: mv a0, zero +; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB10_2: ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz @@ -542,7 +542,7 @@ ; RV64IDZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IDZFH-NEXT: bnez a0, .LBB10_2 ; RV64IDZFH-NEXT: # %bb.1: # %start -; RV64IDZFH-NEXT: mv a0, zero +; RV64IDZFH-NEXT: li a0, 0 ; RV64IDZFH-NEXT: ret ; RV64IDZFH-NEXT: .LBB10_2: ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz @@ -600,13 +600,13 @@ ; RV32IZFH-NEXT: mv a3, a0 ; RV32IZFH-NEXT: bnez s0, .LBB12_2 ; RV32IZFH-NEXT: # %bb.1: # %start -; RV32IZFH-NEXT: mv a3, zero +; RV32IZFH-NEXT: li a3, 0 ; RV32IZFH-NEXT: .LBB12_2: # %start ; RV32IZFH-NEXT: lui a0, %hi(.LCPI12_0) ; RV32IZFH-NEXT: flw ft0, %lo(.LCPI12_0)(a0) ; RV32IZFH-NEXT: flt.s a4, ft0, fs0 -; RV32IZFH-NEXT: addi a2, zero, -1 -; RV32IZFH-NEXT: addi a0, zero, -1 +; RV32IZFH-NEXT: li a2, -1 +; RV32IZFH-NEXT: li a0, -1 ; RV32IZFH-NEXT: beqz a4, .LBB12_7 ; RV32IZFH-NEXT: # %bb.3: # %start ; RV32IZFH-NEXT: beqz s0, .LBB12_8 @@ -625,7 +625,7 @@ ; RV32IZFH-NEXT: mv a0, a3 ; RV32IZFH-NEXT: bnez s0, .LBB12_4 ; RV32IZFH-NEXT: .LBB12_8: # %start -; RV32IZFH-NEXT: mv a1, zero +; RV32IZFH-NEXT: li a1, 0 ; RV32IZFH-NEXT: beqz a4, .LBB12_5 ; RV32IZFH-NEXT: j .LBB12_6 ; @@ -643,13 +643,13 @@ ; RV32IDZFH-NEXT: mv a3, a0 ; RV32IDZFH-NEXT: bnez s0, .LBB12_2 ; RV32IDZFH-NEXT: # %bb.1: # %start -; RV32IDZFH-NEXT: mv a3, zero +; RV32IDZFH-NEXT: li a3, 0 ; RV32IDZFH-NEXT: .LBB12_2: # %start ; RV32IDZFH-NEXT: lui a0, %hi(.LCPI12_0) ; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI12_0)(a0) ; RV32IDZFH-NEXT: flt.s a4, ft0, fs0 -; RV32IDZFH-NEXT: addi a2, zero, -1 -; RV32IDZFH-NEXT: addi a0, zero, -1 +; RV32IDZFH-NEXT: li a2, -1 +; RV32IDZFH-NEXT: li a0, -1 ; RV32IDZFH-NEXT: beqz a4, .LBB12_7 ; RV32IDZFH-NEXT: # %bb.3: # %start ; RV32IDZFH-NEXT: beqz s0, .LBB12_8 @@ -668,7 +668,7 @@ ; RV32IDZFH-NEXT: mv a0, a3 ; RV32IDZFH-NEXT: bnez s0, .LBB12_4 ; RV32IDZFH-NEXT: .LBB12_8: # %start -; RV32IDZFH-NEXT: mv a1, zero +; RV32IDZFH-NEXT: li a1, 0 ; RV32IDZFH-NEXT: beqz a4, .LBB12_5 ; RV32IDZFH-NEXT: j .LBB12_6 ; @@ -677,7 +677,7 @@ ; RV64IZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IZFH-NEXT: bnez a0, .LBB12_2 ; RV64IZFH-NEXT: # %bb.1: # %start -; RV64IZFH-NEXT: mv a0, zero +; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB12_2: ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz @@ -688,7 +688,7 @@ ; RV64IDZFH-NEXT: feq.h a0, fa0, fa0 ; RV64IDZFH-NEXT: bnez a0, .LBB12_2 ; RV64IDZFH-NEXT: # %bb.1: # %start -; RV64IDZFH-NEXT: mv a0, zero +; RV64IDZFH-NEXT: li a0, 0 ; RV64IDZFH-NEXT: ret ; RV64IDZFH-NEXT: .LBB12_2: ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz diff --git a/llvm/test/CodeGen/RISCV/half-fcmp.ll b/llvm/test/CodeGen/RISCV/half-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp.ll @@ -7,12 +7,12 @@ define i32 @fcmp_false(half %a, half %b) nounwind { ; RV32IZFH-LABEL: fcmp_false: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: mv a0, zero +; RV32IZFH-NEXT: li a0, 0 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcmp_false: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: mv a0, zero +; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret %1 = fcmp false half %a, %b %2 = zext i1 %1 to i32 @@ -262,12 +262,12 @@ define i32 @fcmp_true(half %a, half %b) nounwind { ; RV32IZFH-LABEL: fcmp_true: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi a0, zero, 1 +; RV32IZFH-NEXT: li a0, 1 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcmp_true: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi a0, zero, 1 +; RV64IZFH-NEXT: li a0, 1 ; RV64IZFH-NEXT: ret %1 = fcmp true half %a, %b %2 = zext i1 %1 to i32 diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll --- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll +++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll @@ -13,9 +13,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s) ; CHECK-NEXT: addi a0, a0, %lo(s) -; CHECK-NEXT: addi a1, zero, 10 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: sw a1, 160(a0) -; CHECK-NEXT: addi a1, zero, 20 +; CHECK-NEXT: li a1, 20 ; CHECK-NEXT: sw a1, 164(a0) ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ ; CHECK-NEXT: lw a1, 164(a0) ; CHECK-NEXT: blez a1, .LBB1_2 ; CHECK-NEXT: # %bb.1: # %if.then -; CHECK-NEXT: addi a1, zero, 10 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: sw a1, 160(a0) ; CHECK-NEXT: .LBB1_2: # %if.end ; CHECK-NEXT: ret @@ -149,10 +149,10 @@ ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: lui a0, %hi(foo+8) ; RV32-NEXT: lhu a0, %lo(foo+8)(a0) -; RV32-NEXT: addi a1, zero, 140 +; RV32-NEXT: li a1, 140 ; RV32-NEXT: bne a0, a1, .LBB7_2 ; RV32-NEXT: # %bb.1: # %if.end -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -165,10 +165,10 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: lui a0, %hi(foo+8) ; RV64-NEXT: lhu a0, %lo(foo+8)(a0) -; RV64-NEXT: addi a1, zero, 140 +; RV64-NEXT: li a1, 140 ; RV64-NEXT: bne a0, a1, .LBB7_2 ; RV64-NEXT: # %bb.1: # %if.end -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -193,7 +193,7 @@ ; CHECK-LABEL: one_store: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s+160) -; CHECK-NEXT: addi a1, zero, 10 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: sw a1, %lo(s+160)(a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll --- a/llvm/test/CodeGen/RISCV/i32-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll @@ -41,7 +41,7 @@ define i32 @icmp_eq_constant_neg_2048(i32 %a) nounwind { ; RV32I-LABEL: icmp_eq_constant_neg_2048: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -2048 +; RV32I-NEXT: li a1, -2048 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: ret @@ -107,7 +107,7 @@ define i32 @icmp_ne_constant_neg_2048(i32 %a) nounwind { ; RV32I-LABEL: icmp_ne_constant_neg_2048: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -2048 +; RV32I-NEXT: li a1, -2048 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll --- a/llvm/test/CodeGen/RISCV/imm.ll +++ b/llvm/test/CodeGen/RISCV/imm.ll @@ -17,22 +17,22 @@ define signext i32 @zero() nounwind { ; RV32I-LABEL: zero: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: zero: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: mv a0, zero +; RV64IZBA-NEXT: li a0, 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: zero: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: mv a0, zero +; RV64IZBS-NEXT: li a0, 0 ; RV64IZBS-NEXT: ret ret i32 0 } @@ -40,22 +40,22 @@ define signext i32 @pos_small() nounwind { ; RV32I-LABEL: pos_small: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a0, zero, 2047 +; RV32I-NEXT: li a0, 2047 ; RV32I-NEXT: ret ; ; RV64I-LABEL: pos_small: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 2047 +; RV64I-NEXT: li a0, 2047 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: pos_small: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, 2047 +; RV64IZBA-NEXT: li a0, 2047 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: pos_small: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, 2047 +; RV64IZBS-NEXT: li a0, 2047 ; RV64IZBS-NEXT: ret ret i32 2047 } @@ -63,22 +63,22 @@ define signext i32 @neg_small() nounwind { ; RV32I-LABEL: neg_small: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a0, zero, -2048 +; RV32I-NEXT: li a0, -2048 ; RV32I-NEXT: ret ; ; RV64I-LABEL: neg_small: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -2048 +; RV64I-NEXT: li a0, -2048 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: neg_small: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -2048 +; RV64IZBA-NEXT: li a0, -2048 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: neg_small: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -2048 +; RV64IZBS-NEXT: li a0, -2048 ; RV64IZBS-NEXT: ret ret i32 -2048 } @@ -274,24 +274,24 @@ ; RV32I-LABEL: imm64_1: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_1: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm64_1: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, 1 +; RV64IZBA-NEXT: li a0, 1 ; RV64IZBA-NEXT: slli a0, a0, 31 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm64_1: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, 1 +; RV64IZBS-NEXT: li a0, 1 ; RV64IZBS-NEXT: slli a0, a0, 31 ; RV64IZBS-NEXT: ret ret i64 2147483648 ; 0x8000_0000 @@ -300,25 +300,25 @@ define i64 @imm64_2() nounwind { ; RV32I-LABEL: imm64_2: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, -1 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_2: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm64_2: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: srli a0, a0, 32 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm64_2: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -1 +; RV64IZBS-NEXT: li a0, -1 ; RV64IZBS-NEXT: srli a0, a0, 32 ; RV64IZBS-NEXT: ret ret i64 4294967295 ; 0xFFFF_FFFF @@ -327,25 +327,25 @@ define i64 @imm64_3() nounwind { ; RV32I-LABEL: imm64_3: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 1 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a1, 1 +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_3: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm64_3: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, 1 +; RV64IZBA-NEXT: li a0, 1 ; RV64IZBA-NEXT: slli a0, a0, 32 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm64_3: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, 1 +; RV64IZBS-NEXT: li a0, 1 ; RV64IZBS-NEXT: slli a0, a0, 32 ; RV64IZBS-NEXT: ret ret i64 4294967296 ; 0x1_0000_0000 @@ -355,24 +355,24 @@ ; RV32I-LABEL: imm64_4: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_4: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: slli a0, a0, 63 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm64_4: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 63 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm64_4: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -1 +; RV64IZBS-NEXT: li a0, -1 ; RV64IZBS-NEXT: slli a0, a0, 63 ; RV64IZBS-NEXT: ret ret i64 9223372036854775808 ; 0x8000_0000_0000_0000 @@ -382,24 +382,24 @@ ; RV32I-LABEL: imm64_5: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_5: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: slli a0, a0, 63 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm64_5: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 63 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm64_5: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -1 +; RV64IZBS-NEXT: li a0, -1 ; RV64IZBS-NEXT: slli a0, a0, 63 ; RV64IZBS-NEXT: ret ret i64 -9223372036854775808 ; 0x8000_0000_0000_0000 @@ -410,7 +410,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 74565 ; RV32I-NEXT: addi a1, a0, 1656 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_6: @@ -446,7 +446,7 @@ ; ; RV64I-LABEL: imm64_7: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 7 +; RV64I-NEXT: li a0, 7 ; RV64I-NEXT: slli a0, a0, 36 ; RV64I-NEXT: addi a0, a0, 11 ; RV64I-NEXT: slli a0, a0, 24 @@ -455,7 +455,7 @@ ; ; RV64IZBA-LABEL: imm64_7: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, 7 +; RV64IZBA-NEXT: li a0, 7 ; RV64IZBA-NEXT: slli a0, a0, 36 ; RV64IZBA-NEXT: addi a0, a0, 11 ; RV64IZBA-NEXT: slli a0, a0, 24 @@ -464,7 +464,7 @@ ; ; RV64IZBS-LABEL: imm64_7: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, 7 +; RV64IZBS-NEXT: li a0, 7 ; RV64IZBS-NEXT: slli a0, a0, 36 ; RV64IZBS-NEXT: addi a0, a0, 11 ; RV64IZBS-NEXT: slli a0, a0, 24 @@ -525,23 +525,23 @@ define i64 @imm64_9() nounwind { ; RV32I-LABEL: imm64_9: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a0, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_9: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm64_9: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm64_9: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -1 +; RV64IZBS-NEXT: li a0, -1 ; RV64IZBS-NEXT: ret ret i64 -1 } @@ -553,7 +553,7 @@ ; RV32I-LABEL: imm_left_shifted_lui_1: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 524290 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_left_shifted_lui_1: @@ -580,7 +580,7 @@ ; RV32I-LABEL: imm_left_shifted_lui_2: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 4 -; RV32I-NEXT: addi a1, zero, 1 +; RV32I-NEXT: li a1, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_left_shifted_lui_2: @@ -608,7 +608,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 1 ; RV32I-NEXT: addi a1, a0, 1 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_left_shifted_lui_3: @@ -668,7 +668,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 1048575 ; RV32I-NEXT: addi a0, a0, 1 -; RV32I-NEXT: addi a1, zero, 255 +; RV32I-NEXT: li a1, 255 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_right_shifted_lui_2: @@ -700,7 +700,7 @@ define i64 @imm_decoupled_lui_addi() nounwind { ; RV32I-LABEL: imm_decoupled_lui_addi: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a0, zero, -3 +; RV32I-NEXT: li a0, -3 ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: ret ; @@ -739,7 +739,7 @@ ; ; RV64I-LABEL: imm_end_xori_1: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: slli a0, a0, 36 ; RV64I-NEXT: addi a0, a0, 1 ; RV64I-NEXT: slli a0, a0, 25 @@ -748,7 +748,7 @@ ; ; RV64IZBA-LABEL: imm_end_xori_1: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 36 ; RV64IZBA-NEXT: addi a0, a0, 1 ; RV64IZBA-NEXT: slli a0, a0, 25 @@ -757,7 +757,7 @@ ; ; RV64IZBS-LABEL: imm_end_xori_1: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -1 +; RV64IZBS-NEXT: li a0, -1 ; RV64IZBS-NEXT: slli a0, a0, 36 ; RV64IZBS-NEXT: addi a0, a0, 1 ; RV64IZBS-NEXT: slli a0, a0, 25 @@ -779,7 +779,7 @@ ; ; RV64I-LABEL: imm_end_2addi_1: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -2047 +; RV64I-NEXT: li a0, -2047 ; RV64I-NEXT: slli a0, a0, 27 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: slli a0, a0, 12 @@ -788,7 +788,7 @@ ; ; RV64IZBA-LABEL: imm_end_2addi_1: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -2047 +; RV64IZBA-NEXT: li a0, -2047 ; RV64IZBA-NEXT: slli a0, a0, 27 ; RV64IZBA-NEXT: addi a0, a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 12 @@ -797,7 +797,7 @@ ; ; RV64IZBS-LABEL: imm_end_2addi_1: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -2047 +; RV64IZBS-NEXT: li a0, -2047 ; RV64IZBS-NEXT: slli a0, a0, 27 ; RV64IZBS-NEXT: addi a0, a0, -1 ; RV64IZBS-NEXT: slli a0, a0, 12 @@ -819,7 +819,7 @@ ; ; RV64I-LABEL: imm_2reg_1: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: slli a0, a0, 35 ; RV64I-NEXT: addi a0, a0, 9 ; RV64I-NEXT: slli a0, a0, 13 @@ -830,7 +830,7 @@ ; ; RV64IZBA-LABEL: imm_2reg_1: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 35 ; RV64IZBA-NEXT: addi a0, a0, 9 ; RV64IZBA-NEXT: slli a0, a0, 13 @@ -855,25 +855,25 @@ define void @imm_store_i16_neg1(i16* %p) nounwind { ; RV32I-LABEL: imm_store_i16_neg1: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: sh a1, 0(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_store_i16_neg1: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: sh a1, 0(a0) ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_store_i16_neg1: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a1, zero, -1 +; RV64IZBA-NEXT: li a1, -1 ; RV64IZBA-NEXT: sh a1, 0(a0) ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_store_i16_neg1: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a1, zero, -1 +; RV64IZBS-NEXT: li a1, -1 ; RV64IZBS-NEXT: sh a1, 0(a0) ; RV64IZBS-NEXT: ret store i16 -1, i16* %p @@ -884,25 +884,25 @@ define void @imm_store_i32_neg1(i32* %p) nounwind { ; RV32I-LABEL: imm_store_i32_neg1: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_store_i32_neg1: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: sw a1, 0(a0) ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_store_i32_neg1: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a1, zero, -1 +; RV64IZBA-NEXT: li a1, -1 ; RV64IZBA-NEXT: sw a1, 0(a0) ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_store_i32_neg1: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a1, zero, -1 +; RV64IZBS-NEXT: li a1, -1 ; RV64IZBS-NEXT: sw a1, 0(a0) ; RV64IZBS-NEXT: ret store i32 -1, i32* %p @@ -914,7 +914,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 263018 ; RV32I-NEXT: addi a0, a0, -795 -; RV32I-NEXT: addi a1, zero, 1 +; RV32I-NEXT: li a1, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_5372288229: @@ -946,7 +946,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 785558 ; RV32I-NEXT: addi a0, a0, 795 -; RV32I-NEXT: addi a1, zero, -2 +; RV32I-NEXT: li a1, -2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_5372288229: @@ -978,7 +978,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 88838 ; RV32I-NEXT: addi a0, a0, -1325 -; RV32I-NEXT: addi a1, zero, 2 +; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_8953813715: @@ -1010,7 +1010,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 959738 ; RV32I-NEXT: addi a0, a0, 1325 -; RV32I-NEXT: addi a1, zero, -3 +; RV32I-NEXT: li a1, -3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_8953813715: @@ -1042,7 +1042,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 789053 ; RV32I-NEXT: addi a0, a0, 1711 -; RV32I-NEXT: addi a1, zero, 3 +; RV32I-NEXT: li a1, 3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_16116864687: @@ -1075,7 +1075,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 259523 ; RV32I-NEXT: addi a0, a0, -1711 -; RV32I-NEXT: addi a1, zero, -4 +; RV32I-NEXT: li a1, -4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_16116864687: @@ -1108,7 +1108,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 572348 ; RV32I-NEXT: addi a0, a0, -1093 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_2344336315: @@ -1181,7 +1181,7 @@ ; ; RV64I-LABEL: imm_neg_9223372034778874949: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: slli a0, a0, 37 ; RV64I-NEXT: addi a0, a0, 31 ; RV64I-NEXT: slli a0, a0, 12 @@ -1192,7 +1192,7 @@ ; ; RV64IZBA-LABEL: imm_neg_9223372034778874949: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 37 ; RV64IZBA-NEXT: addi a0, a0, 31 ; RV64IZBA-NEXT: slli a0, a0, 12 @@ -1255,7 +1255,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 506812 ; RV32I-NEXT: addi a0, a0, -1093 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_2219066437: @@ -1397,7 +1397,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 699051 ; RV32I-NEXT: addi a0, a0, -1366 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_2863311530: @@ -1429,7 +1429,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1366 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_2863311530: @@ -1461,26 +1461,26 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 524288 ; RV32I-NEXT: addi a0, a0, 1365 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_2147486378: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, 1365 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_2147486378: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, 1 +; RV64IZBA-NEXT: li a0, 1 ; RV64IZBA-NEXT: slli a0, a0, 31 ; RV64IZBA-NEXT: addi a0, a0, 1365 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_2147486378: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, 1365 +; RV64IZBS-NEXT: li a0, 1365 ; RV64IZBS-NEXT: bseti a0, a0, 31 ; RV64IZBS-NEXT: ret ret i64 2147485013 @@ -1491,26 +1491,26 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 524288 ; RV32I-NEXT: addi a0, a0, -1365 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_2147485013: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, -1365 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_2147485013: ; RV64IZBA: # %bb.0: -; RV64IZBA-NEXT: addi a0, zero, -1 +; RV64IZBA-NEXT: li a0, -1 ; RV64IZBA-NEXT: slli a0, a0, 31 ; RV64IZBA-NEXT: addi a0, a0, -1365 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_2147485013: ; RV64IZBS: # %bb.0: -; RV64IZBS-NEXT: addi a0, zero, -1365 +; RV64IZBS-NEXT: li a0, -1365 ; RV64IZBS-NEXT: bclri a0, a0, 31 ; RV64IZBS-NEXT: ret ret i64 -2147485013 @@ -1554,7 +1554,7 @@ ; RV32I-LABEL: imm_50394234880: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 768944 -; RV32I-NEXT: addi a1, zero, 11 +; RV32I-NEXT: li a1, 11 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_50394234880: diff --git a/llvm/test/CodeGen/RISCV/indirectbr.ll b/llvm/test/CodeGen/RISCV/indirectbr.ll --- a/llvm/test/CodeGen/RISCV/indirectbr.ll +++ b/llvm/test/CodeGen/RISCV/indirectbr.ll @@ -7,7 +7,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: jr a0 ; RV32I-NEXT: .LBB0_1: # %test_label -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret indirectbr i8* %target, [label %test_label] test_label: @@ -21,7 +21,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: jr 1380(a0) ; RV32I-NEXT: .LBB1_1: # %test_label -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret %target = getelementptr inbounds i8, i8* %a, i32 1380 indirectbr i8* %target, [label %test_label] diff --git a/llvm/test/CodeGen/RISCV/jumptable.ll b/llvm/test/CodeGen/RISCV/jumptable.ll --- a/llvm/test/CodeGen/RISCV/jumptable.ll +++ b/llvm/test/CodeGen/RISCV/jumptable.ll @@ -11,31 +11,31 @@ define void @below_threshold(i32 %in, i32* %out) nounwind { ; RV32I-SMALL-LABEL: below_threshold: ; RV32I-SMALL: # %bb.0: # %entry -; RV32I-SMALL-NEXT: addi a2, zero, 2 +; RV32I-SMALL-NEXT: li a2, 2 ; RV32I-SMALL-NEXT: blt a2, a0, .LBB0_4 ; RV32I-SMALL-NEXT: # %bb.1: # %entry -; RV32I-SMALL-NEXT: addi a2, zero, 1 +; RV32I-SMALL-NEXT: li a2, 1 ; RV32I-SMALL-NEXT: beq a0, a2, .LBB0_7 ; RV32I-SMALL-NEXT: # %bb.2: # %entry -; RV32I-SMALL-NEXT: addi a2, zero, 2 +; RV32I-SMALL-NEXT: li a2, 2 ; RV32I-SMALL-NEXT: bne a0, a2, .LBB0_10 ; RV32I-SMALL-NEXT: # %bb.3: # %bb2 -; RV32I-SMALL-NEXT: addi a0, zero, 3 +; RV32I-SMALL-NEXT: li a0, 3 ; RV32I-SMALL-NEXT: j .LBB0_9 ; RV32I-SMALL-NEXT: .LBB0_4: # %entry -; RV32I-SMALL-NEXT: addi a2, zero, 3 +; RV32I-SMALL-NEXT: li a2, 3 ; RV32I-SMALL-NEXT: beq a0, a2, .LBB0_8 ; RV32I-SMALL-NEXT: # %bb.5: # %entry -; RV32I-SMALL-NEXT: addi a2, zero, 4 +; RV32I-SMALL-NEXT: li a2, 4 ; RV32I-SMALL-NEXT: bne a0, a2, .LBB0_10 ; RV32I-SMALL-NEXT: # %bb.6: # %bb4 -; RV32I-SMALL-NEXT: addi a0, zero, 1 +; RV32I-SMALL-NEXT: li a0, 1 ; RV32I-SMALL-NEXT: j .LBB0_9 ; RV32I-SMALL-NEXT: .LBB0_7: # %bb1 -; RV32I-SMALL-NEXT: addi a0, zero, 4 +; RV32I-SMALL-NEXT: li a0, 4 ; RV32I-SMALL-NEXT: j .LBB0_9 ; RV32I-SMALL-NEXT: .LBB0_8: # %bb3 -; RV32I-SMALL-NEXT: addi a0, zero, 2 +; RV32I-SMALL-NEXT: li a0, 2 ; RV32I-SMALL-NEXT: .LBB0_9: # %exit ; RV32I-SMALL-NEXT: sw a0, 0(a1) ; RV32I-SMALL-NEXT: .LBB0_10: # %exit @@ -43,31 +43,31 @@ ; ; RV32I-MEDIUM-LABEL: below_threshold: ; RV32I-MEDIUM: # %bb.0: # %entry -; RV32I-MEDIUM-NEXT: addi a2, zero, 2 +; RV32I-MEDIUM-NEXT: li a2, 2 ; RV32I-MEDIUM-NEXT: blt a2, a0, .LBB0_4 ; RV32I-MEDIUM-NEXT: # %bb.1: # %entry -; RV32I-MEDIUM-NEXT: addi a2, zero, 1 +; RV32I-MEDIUM-NEXT: li a2, 1 ; RV32I-MEDIUM-NEXT: beq a0, a2, .LBB0_7 ; RV32I-MEDIUM-NEXT: # %bb.2: # %entry -; RV32I-MEDIUM-NEXT: addi a2, zero, 2 +; RV32I-MEDIUM-NEXT: li a2, 2 ; RV32I-MEDIUM-NEXT: bne a0, a2, .LBB0_10 ; RV32I-MEDIUM-NEXT: # %bb.3: # %bb2 -; RV32I-MEDIUM-NEXT: addi a0, zero, 3 +; RV32I-MEDIUM-NEXT: li a0, 3 ; RV32I-MEDIUM-NEXT: j .LBB0_9 ; RV32I-MEDIUM-NEXT: .LBB0_4: # %entry -; RV32I-MEDIUM-NEXT: addi a2, zero, 3 +; RV32I-MEDIUM-NEXT: li a2, 3 ; RV32I-MEDIUM-NEXT: beq a0, a2, .LBB0_8 ; RV32I-MEDIUM-NEXT: # %bb.5: # %entry -; RV32I-MEDIUM-NEXT: addi a2, zero, 4 +; RV32I-MEDIUM-NEXT: li a2, 4 ; RV32I-MEDIUM-NEXT: bne a0, a2, .LBB0_10 ; RV32I-MEDIUM-NEXT: # %bb.6: # %bb4 -; RV32I-MEDIUM-NEXT: addi a0, zero, 1 +; RV32I-MEDIUM-NEXT: li a0, 1 ; RV32I-MEDIUM-NEXT: j .LBB0_9 ; RV32I-MEDIUM-NEXT: .LBB0_7: # %bb1 -; RV32I-MEDIUM-NEXT: addi a0, zero, 4 +; RV32I-MEDIUM-NEXT: li a0, 4 ; RV32I-MEDIUM-NEXT: j .LBB0_9 ; RV32I-MEDIUM-NEXT: .LBB0_8: # %bb3 -; RV32I-MEDIUM-NEXT: addi a0, zero, 2 +; RV32I-MEDIUM-NEXT: li a0, 2 ; RV32I-MEDIUM-NEXT: .LBB0_9: # %exit ; RV32I-MEDIUM-NEXT: sw a0, 0(a1) ; RV32I-MEDIUM-NEXT: .LBB0_10: # %exit @@ -76,31 +76,31 @@ ; RV64I-SMALL-LABEL: below_threshold: ; RV64I-SMALL: # %bb.0: # %entry ; RV64I-SMALL-NEXT: sext.w a0, a0 -; RV64I-SMALL-NEXT: addi a2, zero, 2 +; RV64I-SMALL-NEXT: li a2, 2 ; RV64I-SMALL-NEXT: blt a2, a0, .LBB0_4 ; RV64I-SMALL-NEXT: # %bb.1: # %entry -; RV64I-SMALL-NEXT: addi a2, zero, 1 +; RV64I-SMALL-NEXT: li a2, 1 ; RV64I-SMALL-NEXT: beq a0, a2, .LBB0_7 ; RV64I-SMALL-NEXT: # %bb.2: # %entry -; RV64I-SMALL-NEXT: addi a2, zero, 2 +; RV64I-SMALL-NEXT: li a2, 2 ; RV64I-SMALL-NEXT: bne a0, a2, .LBB0_10 ; RV64I-SMALL-NEXT: # %bb.3: # %bb2 -; RV64I-SMALL-NEXT: addi a0, zero, 3 +; RV64I-SMALL-NEXT: li a0, 3 ; RV64I-SMALL-NEXT: j .LBB0_9 ; RV64I-SMALL-NEXT: .LBB0_4: # %entry -; RV64I-SMALL-NEXT: addi a2, zero, 3 +; RV64I-SMALL-NEXT: li a2, 3 ; RV64I-SMALL-NEXT: beq a0, a2, .LBB0_8 ; RV64I-SMALL-NEXT: # %bb.5: # %entry -; RV64I-SMALL-NEXT: addi a2, zero, 4 +; RV64I-SMALL-NEXT: li a2, 4 ; RV64I-SMALL-NEXT: bne a0, a2, .LBB0_10 ; RV64I-SMALL-NEXT: # %bb.6: # %bb4 -; RV64I-SMALL-NEXT: addi a0, zero, 1 +; RV64I-SMALL-NEXT: li a0, 1 ; RV64I-SMALL-NEXT: j .LBB0_9 ; RV64I-SMALL-NEXT: .LBB0_7: # %bb1 -; RV64I-SMALL-NEXT: addi a0, zero, 4 +; RV64I-SMALL-NEXT: li a0, 4 ; RV64I-SMALL-NEXT: j .LBB0_9 ; RV64I-SMALL-NEXT: .LBB0_8: # %bb3 -; RV64I-SMALL-NEXT: addi a0, zero, 2 +; RV64I-SMALL-NEXT: li a0, 2 ; RV64I-SMALL-NEXT: .LBB0_9: # %exit ; RV64I-SMALL-NEXT: sw a0, 0(a1) ; RV64I-SMALL-NEXT: .LBB0_10: # %exit @@ -109,31 +109,31 @@ ; RV64I-MEDIUM-LABEL: below_threshold: ; RV64I-MEDIUM: # %bb.0: # %entry ; RV64I-MEDIUM-NEXT: sext.w a0, a0 -; RV64I-MEDIUM-NEXT: addi a2, zero, 2 +; RV64I-MEDIUM-NEXT: li a2, 2 ; RV64I-MEDIUM-NEXT: blt a2, a0, .LBB0_4 ; RV64I-MEDIUM-NEXT: # %bb.1: # %entry -; RV64I-MEDIUM-NEXT: addi a2, zero, 1 +; RV64I-MEDIUM-NEXT: li a2, 1 ; RV64I-MEDIUM-NEXT: beq a0, a2, .LBB0_7 ; RV64I-MEDIUM-NEXT: # %bb.2: # %entry -; RV64I-MEDIUM-NEXT: addi a2, zero, 2 +; RV64I-MEDIUM-NEXT: li a2, 2 ; RV64I-MEDIUM-NEXT: bne a0, a2, .LBB0_10 ; RV64I-MEDIUM-NEXT: # %bb.3: # %bb2 -; RV64I-MEDIUM-NEXT: addi a0, zero, 3 +; RV64I-MEDIUM-NEXT: li a0, 3 ; RV64I-MEDIUM-NEXT: j .LBB0_9 ; RV64I-MEDIUM-NEXT: .LBB0_4: # %entry -; RV64I-MEDIUM-NEXT: addi a2, zero, 3 +; RV64I-MEDIUM-NEXT: li a2, 3 ; RV64I-MEDIUM-NEXT: beq a0, a2, .LBB0_8 ; RV64I-MEDIUM-NEXT: # %bb.5: # %entry -; RV64I-MEDIUM-NEXT: addi a2, zero, 4 +; RV64I-MEDIUM-NEXT: li a2, 4 ; RV64I-MEDIUM-NEXT: bne a0, a2, .LBB0_10 ; RV64I-MEDIUM-NEXT: # %bb.6: # %bb4 -; RV64I-MEDIUM-NEXT: addi a0, zero, 1 +; RV64I-MEDIUM-NEXT: li a0, 1 ; RV64I-MEDIUM-NEXT: j .LBB0_9 ; RV64I-MEDIUM-NEXT: .LBB0_7: # %bb1 -; RV64I-MEDIUM-NEXT: addi a0, zero, 4 +; RV64I-MEDIUM-NEXT: li a0, 4 ; RV64I-MEDIUM-NEXT: j .LBB0_9 ; RV64I-MEDIUM-NEXT: .LBB0_8: # %bb3 -; RV64I-MEDIUM-NEXT: addi a0, zero, 2 +; RV64I-MEDIUM-NEXT: li a0, 2 ; RV64I-MEDIUM-NEXT: .LBB0_9: # %exit ; RV64I-MEDIUM-NEXT: sw a0, 0(a1) ; RV64I-MEDIUM-NEXT: .LBB0_10: # %exit @@ -165,7 +165,7 @@ ; RV32I-SMALL-LABEL: above_threshold: ; RV32I-SMALL: # %bb.0: # %entry ; RV32I-SMALL-NEXT: addi a0, a0, -1 -; RV32I-SMALL-NEXT: addi a2, zero, 5 +; RV32I-SMALL-NEXT: li a2, 5 ; RV32I-SMALL-NEXT: bltu a2, a0, .LBB1_9 ; RV32I-SMALL-NEXT: # %bb.1: # %entry ; RV32I-SMALL-NEXT: slli a0, a0, 2 @@ -175,22 +175,22 @@ ; RV32I-SMALL-NEXT: lw a0, 0(a0) ; RV32I-SMALL-NEXT: jr a0 ; RV32I-SMALL-NEXT: .LBB1_2: # %bb1 -; RV32I-SMALL-NEXT: addi a0, zero, 4 +; RV32I-SMALL-NEXT: li a0, 4 ; RV32I-SMALL-NEXT: j .LBB1_8 ; RV32I-SMALL-NEXT: .LBB1_3: # %bb2 -; RV32I-SMALL-NEXT: addi a0, zero, 3 +; RV32I-SMALL-NEXT: li a0, 3 ; RV32I-SMALL-NEXT: j .LBB1_8 ; RV32I-SMALL-NEXT: .LBB1_4: # %bb3 -; RV32I-SMALL-NEXT: addi a0, zero, 2 +; RV32I-SMALL-NEXT: li a0, 2 ; RV32I-SMALL-NEXT: j .LBB1_8 ; RV32I-SMALL-NEXT: .LBB1_5: # %bb4 -; RV32I-SMALL-NEXT: addi a0, zero, 1 +; RV32I-SMALL-NEXT: li a0, 1 ; RV32I-SMALL-NEXT: j .LBB1_8 ; RV32I-SMALL-NEXT: .LBB1_6: # %bb5 -; RV32I-SMALL-NEXT: addi a0, zero, 100 +; RV32I-SMALL-NEXT: li a0, 100 ; RV32I-SMALL-NEXT: j .LBB1_8 ; RV32I-SMALL-NEXT: .LBB1_7: # %bb6 -; RV32I-SMALL-NEXT: addi a0, zero, 200 +; RV32I-SMALL-NEXT: li a0, 200 ; RV32I-SMALL-NEXT: .LBB1_8: # %exit ; RV32I-SMALL-NEXT: sw a0, 0(a1) ; RV32I-SMALL-NEXT: .LBB1_9: # %exit @@ -199,7 +199,7 @@ ; RV32I-MEDIUM-LABEL: above_threshold: ; RV32I-MEDIUM: # %bb.0: # %entry ; RV32I-MEDIUM-NEXT: addi a0, a0, -1 -; RV32I-MEDIUM-NEXT: addi a2, zero, 5 +; RV32I-MEDIUM-NEXT: li a2, 5 ; RV32I-MEDIUM-NEXT: bltu a2, a0, .LBB1_9 ; RV32I-MEDIUM-NEXT: # %bb.1: # %entry ; RV32I-MEDIUM-NEXT: slli a0, a0, 2 @@ -211,22 +211,22 @@ ; RV32I-MEDIUM-NEXT: lw a0, 0(a0) ; RV32I-MEDIUM-NEXT: jr a0 ; RV32I-MEDIUM-NEXT: .LBB1_2: # %bb1 -; RV32I-MEDIUM-NEXT: addi a0, zero, 4 +; RV32I-MEDIUM-NEXT: li a0, 4 ; RV32I-MEDIUM-NEXT: j .LBB1_8 ; RV32I-MEDIUM-NEXT: .LBB1_3: # %bb2 -; RV32I-MEDIUM-NEXT: addi a0, zero, 3 +; RV32I-MEDIUM-NEXT: li a0, 3 ; RV32I-MEDIUM-NEXT: j .LBB1_8 ; RV32I-MEDIUM-NEXT: .LBB1_4: # %bb3 -; RV32I-MEDIUM-NEXT: addi a0, zero, 2 +; RV32I-MEDIUM-NEXT: li a0, 2 ; RV32I-MEDIUM-NEXT: j .LBB1_8 ; RV32I-MEDIUM-NEXT: .LBB1_5: # %bb4 -; RV32I-MEDIUM-NEXT: addi a0, zero, 1 +; RV32I-MEDIUM-NEXT: li a0, 1 ; RV32I-MEDIUM-NEXT: j .LBB1_8 ; RV32I-MEDIUM-NEXT: .LBB1_6: # %bb5 -; RV32I-MEDIUM-NEXT: addi a0, zero, 100 +; RV32I-MEDIUM-NEXT: li a0, 100 ; RV32I-MEDIUM-NEXT: j .LBB1_8 ; RV32I-MEDIUM-NEXT: .LBB1_7: # %bb6 -; RV32I-MEDIUM-NEXT: addi a0, zero, 200 +; RV32I-MEDIUM-NEXT: li a0, 200 ; RV32I-MEDIUM-NEXT: .LBB1_8: # %exit ; RV32I-MEDIUM-NEXT: sw a0, 0(a1) ; RV32I-MEDIUM-NEXT: .LBB1_9: # %exit @@ -236,7 +236,7 @@ ; RV64I-SMALL: # %bb.0: # %entry ; RV64I-SMALL-NEXT: sext.w a0, a0 ; RV64I-SMALL-NEXT: addi a0, a0, -1 -; RV64I-SMALL-NEXT: addi a2, zero, 5 +; RV64I-SMALL-NEXT: li a2, 5 ; RV64I-SMALL-NEXT: bltu a2, a0, .LBB1_9 ; RV64I-SMALL-NEXT: # %bb.1: # %entry ; RV64I-SMALL-NEXT: slli a0, a0, 3 @@ -246,22 +246,22 @@ ; RV64I-SMALL-NEXT: ld a0, 0(a0) ; RV64I-SMALL-NEXT: jr a0 ; RV64I-SMALL-NEXT: .LBB1_2: # %bb1 -; RV64I-SMALL-NEXT: addi a0, zero, 4 +; RV64I-SMALL-NEXT: li a0, 4 ; RV64I-SMALL-NEXT: j .LBB1_8 ; RV64I-SMALL-NEXT: .LBB1_3: # %bb2 -; RV64I-SMALL-NEXT: addi a0, zero, 3 +; RV64I-SMALL-NEXT: li a0, 3 ; RV64I-SMALL-NEXT: j .LBB1_8 ; RV64I-SMALL-NEXT: .LBB1_4: # %bb3 -; RV64I-SMALL-NEXT: addi a0, zero, 2 +; RV64I-SMALL-NEXT: li a0, 2 ; RV64I-SMALL-NEXT: j .LBB1_8 ; RV64I-SMALL-NEXT: .LBB1_5: # %bb4 -; RV64I-SMALL-NEXT: addi a0, zero, 1 +; RV64I-SMALL-NEXT: li a0, 1 ; RV64I-SMALL-NEXT: j .LBB1_8 ; RV64I-SMALL-NEXT: .LBB1_6: # %bb5 -; RV64I-SMALL-NEXT: addi a0, zero, 100 +; RV64I-SMALL-NEXT: li a0, 100 ; RV64I-SMALL-NEXT: j .LBB1_8 ; RV64I-SMALL-NEXT: .LBB1_7: # %bb6 -; RV64I-SMALL-NEXT: addi a0, zero, 200 +; RV64I-SMALL-NEXT: li a0, 200 ; RV64I-SMALL-NEXT: .LBB1_8: # %exit ; RV64I-SMALL-NEXT: sw a0, 0(a1) ; RV64I-SMALL-NEXT: .LBB1_9: # %exit @@ -271,7 +271,7 @@ ; RV64I-MEDIUM: # %bb.0: # %entry ; RV64I-MEDIUM-NEXT: sext.w a0, a0 ; RV64I-MEDIUM-NEXT: addi a0, a0, -1 -; RV64I-MEDIUM-NEXT: addi a2, zero, 5 +; RV64I-MEDIUM-NEXT: li a2, 5 ; RV64I-MEDIUM-NEXT: bltu a2, a0, .LBB1_9 ; RV64I-MEDIUM-NEXT: # %bb.1: # %entry ; RV64I-MEDIUM-NEXT: slli a0, a0, 3 @@ -283,22 +283,22 @@ ; RV64I-MEDIUM-NEXT: ld a0, 0(a0) ; RV64I-MEDIUM-NEXT: jr a0 ; RV64I-MEDIUM-NEXT: .LBB1_2: # %bb1 -; RV64I-MEDIUM-NEXT: addi a0, zero, 4 +; RV64I-MEDIUM-NEXT: li a0, 4 ; RV64I-MEDIUM-NEXT: j .LBB1_8 ; RV64I-MEDIUM-NEXT: .LBB1_3: # %bb2 -; RV64I-MEDIUM-NEXT: addi a0, zero, 3 +; RV64I-MEDIUM-NEXT: li a0, 3 ; RV64I-MEDIUM-NEXT: j .LBB1_8 ; RV64I-MEDIUM-NEXT: .LBB1_4: # %bb3 -; RV64I-MEDIUM-NEXT: addi a0, zero, 2 +; RV64I-MEDIUM-NEXT: li a0, 2 ; RV64I-MEDIUM-NEXT: j .LBB1_8 ; RV64I-MEDIUM-NEXT: .LBB1_5: # %bb4 -; RV64I-MEDIUM-NEXT: addi a0, zero, 1 +; RV64I-MEDIUM-NEXT: li a0, 1 ; RV64I-MEDIUM-NEXT: j .LBB1_8 ; RV64I-MEDIUM-NEXT: .LBB1_6: # %bb5 -; RV64I-MEDIUM-NEXT: addi a0, zero, 100 +; RV64I-MEDIUM-NEXT: li a0, 100 ; RV64I-MEDIUM-NEXT: j .LBB1_8 ; RV64I-MEDIUM-NEXT: .LBB1_7: # %bb6 -; RV64I-MEDIUM-NEXT: addi a0, zero, 200 +; RV64I-MEDIUM-NEXT: li a0, 200 ; RV64I-MEDIUM-NEXT: .LBB1_8: # %exit ; RV64I-MEDIUM-NEXT: sw a0, 0(a1) ; RV64I-MEDIUM-NEXT: .LBB1_9: # %exit diff --git a/llvm/test/CodeGen/RISCV/legalize-fneg.ll b/llvm/test/CodeGen/RISCV/legalize-fneg.ll --- a/llvm/test/CodeGen/RISCV/legalize-fneg.ll +++ b/llvm/test/CodeGen/RISCV/legalize-fneg.ll @@ -16,7 +16,7 @@ ; RV64-LABEL: test1: ; RV64: # %bb.0: # %entry ; RV64-NEXT: lw a1, 0(a1) -; RV64-NEXT: addi a2, zero, 1 +; RV64-NEXT: li a2, 1 ; RV64-NEXT: slli a2, a2, 31 ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sw a1, 0(a0) @@ -42,7 +42,7 @@ ; RV64-LABEL: test2: ; RV64: # %bb.0: # %entry ; RV64-NEXT: ld a1, 0(a1) -; RV64-NEXT: addi a2, zero, -1 +; RV64-NEXT: li a2, -1 ; RV64-NEXT: slli a2, a2, 63 ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sd a1, 0(a0) @@ -73,7 +73,7 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: ld a2, 8(a1) ; RV64-NEXT: ld a1, 0(a1) -; RV64-NEXT: addi a3, zero, -1 +; RV64-NEXT: li a3, -1 ; RV64-NEXT: slli a3, a3, 63 ; RV64-NEXT: xor a2, a2, a3 ; RV64-NEXT: sd a1, 0(a0) diff --git a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll --- a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll +++ b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll @@ -11,7 +11,7 @@ define i32 @main() nounwind { ; RV32I-LABEL: main: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: lui a1, %hi(b) ; RV32I-NEXT: addi a1, a1, %lo(b) ; RV32I-NEXT: lui a2, %hi(a) @@ -27,7 +27,7 @@ ; RV32I-NEXT: addi a2, a2, 4 ; RV32I-NEXT: bne a0, a3, .LBB0_1 ; RV32I-NEXT: # %bb.2: # %for.end -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret entry: br label %for.body diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -181,7 +181,7 @@ ; ; RV32IM-LABEL: mul64_constant: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a2, zero, 5 +; RV32IM-NEXT: li a2, 5 ; RV32IM-NEXT: mulhu a2, a0, a2 ; RV32IM-NEXT: slli a3, a1, 2 ; RV32IM-NEXT: add a1, a3, a1 @@ -267,7 +267,7 @@ ; ; RV32IM-LABEL: mulhs_positive_constant: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, 5 +; RV32IM-NEXT: li a1, 5 ; RV32IM-NEXT: mulh a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -312,7 +312,7 @@ ; ; RV32IM-LABEL: mulhs_negative_constant: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, -5 +; RV32IM-NEXT: li a1, -5 ; RV32IM-NEXT: mulh a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -346,8 +346,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: mv a1, zero -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -389,7 +389,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a2, a1 ; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -442,7 +442,7 @@ ; ; RV32IM-LABEL: mulhu_constant: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, 5 +; RV32IM-NEXT: li a1, 5 ; RV32IM-NEXT: mulhu a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -542,7 +542,7 @@ ; ; RV32IM-LABEL: muli64_p65: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a2, zero, 65 +; RV32IM-NEXT: li a2, 65 ; RV32IM-NEXT: mulhu a2, a0, a2 ; RV32IM-NEXT: slli a3, a1, 6 ; RV32IM-NEXT: add a1, a3, a1 @@ -581,7 +581,7 @@ ; ; RV32IM-LABEL: muli64_p63: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a2, zero, 63 +; RV32IM-NEXT: li a2, 63 ; RV32IM-NEXT: mulhu a2, a0, a2 ; RV32IM-NEXT: slli a3, a1, 6 ; RV32IM-NEXT: sub a1, a3, a1 @@ -682,7 +682,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a2, a1, 6 ; RV32IM-NEXT: sub a1, a1, a2 -; RV32IM-NEXT: addi a2, zero, -63 +; RV32IM-NEXT: li a2, -63 ; RV32IM-NEXT: mulhu a2, a0, a2 ; RV32IM-NEXT: sub a2, a2, a0 ; RV32IM-NEXT: add a1, a2, a1 @@ -726,7 +726,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a2, a1, 6 ; RV32IM-NEXT: add a1, a2, a1 -; RV32IM-NEXT: addi a2, zero, -65 +; RV32IM-NEXT: li a2, -65 ; RV32IM-NEXT: mulhu a2, a0, a2 ; RV32IM-NEXT: sub a2, a2, a0 ; RV32IM-NEXT: sub a1, a2, a1 @@ -757,7 +757,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 384 +; RV32I-NEXT: li a1, 384 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -765,7 +765,7 @@ ; ; RV32IM-LABEL: muli32_p384: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, 384 +; RV32IM-NEXT: li a1, 384 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -773,7 +773,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 384 +; RV64I-NEXT: li a1, 384 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -781,7 +781,7 @@ ; ; RV64IM-LABEL: muli32_p384: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, 384 +; RV64IM-NEXT: li a1, 384 ; RV64IM-NEXT: mulw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 384 @@ -1055,7 +1055,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: lui a2, 1048575 ; RV32I-NEXT: addi a2, a2, -256 -; RV32I-NEXT: addi a3, zero, -1 +; RV32I-NEXT: li a3, -1 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -1208,7 +1208,7 @@ ; RV32IM-NEXT: sub t0, a2, a4 ; RV32IM-NEXT: neg t4, a4 ; RV32IM-NEXT: sltu t1, t0, t4 -; RV32IM-NEXT: addi t2, zero, -1 +; RV32IM-NEXT: li t2, -1 ; RV32IM-NEXT: mulhu t3, a4, t2 ; RV32IM-NEXT: add a2, t3, t1 ; RV32IM-NEXT: add t1, t5, a2 @@ -1323,7 +1323,7 @@ ; RV32IM-NEXT: lw a3, 0(a1) ; RV32IM-NEXT: lw a4, 4(a1) ; RV32IM-NEXT: lw t5, 8(a1) -; RV32IM-NEXT: addi a6, zero, -63 +; RV32IM-NEXT: li a6, -63 ; RV32IM-NEXT: mulhu a5, a3, a6 ; RV32IM-NEXT: slli a2, a4, 6 ; RV32IM-NEXT: sub a2, a2, a4 @@ -1335,7 +1335,7 @@ ; RV32IM-NEXT: sub t0, a5, a3 ; RV32IM-NEXT: neg t1, a3 ; RV32IM-NEXT: sltu a5, t0, t1 -; RV32IM-NEXT: addi t2, zero, -1 +; RV32IM-NEXT: li t2, -1 ; RV32IM-NEXT: mulhu t3, a3, t2 ; RV32IM-NEXT: add a5, t3, a5 ; RV32IM-NEXT: add a5, t4, a5 @@ -1391,7 +1391,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a2, a1, 6 ; RV64IM-NEXT: sub a1, a1, a2 -; RV64IM-NEXT: addi a2, zero, -63 +; RV64IM-NEXT: li a2, -63 ; RV64IM-NEXT: mulhu a2, a0, a2 ; RV64IM-NEXT: sub a2, a2, a0 ; RV64IM-NEXT: add a1, a2, a1 @@ -1422,39 +1422,39 @@ ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: srai s4, a3, 31 -; RV32I-NEXT: mv a1, zero -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: mv s1, a1 ; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: mv a2, s5 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: add s1, a0, s1 ; RV32I-NEXT: sltu a0, s1, a0 ; RV32I-NEXT: add s7, a1, a0 ; RV32I-NEXT: mv a0, s3 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: mv a2, s0 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: add a2, a0, s1 ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: add s8, s7, a0 ; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: mv a2, s0 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: mv s9, a0 ; RV32I-NEXT: mv s6, a1 ; RV32I-NEXT: add s1, a0, s8 ; RV32I-NEXT: mv a0, s5 ; RV32I-NEXT: mv a1, s0 -; RV32I-NEXT: mv a2, zero -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __muldi3@plt ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: mv s5, a1 @@ -1530,7 +1530,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv a2, a1 ; RV64I-NEXT: srai a3, a1, 63 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __multi3@plt ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll --- a/llvm/test/CodeGen/RISCV/pr51206.ll +++ b/llvm/test/CodeGen/RISCV/pr51206.ll @@ -29,13 +29,13 @@ ; CHECK-NEXT: mul a1, a1, a2 ; CHECK-NEXT: srli a1, a1, 18 ; CHECK-NEXT: lui a2, %hi(global.3) -; CHECK-NEXT: addi a3, zero, 5 +; CHECK-NEXT: li a3, 5 ; CHECK-NEXT: sw a1, %lo(global.3)(a2) ; CHECK-NEXT: bltu a0, a3, .LBB0_2 ; CHECK-NEXT: # %bb.1: # %bb10 ; CHECK-NEXT: call quux@plt ; CHECK-NEXT: .LBB0_2: # %bb12 -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll --- a/llvm/test/CodeGen/RISCV/rem.ll +++ b/llvm/test/CodeGen/RISCV/rem.ll @@ -50,7 +50,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: addi a0, zero, 10 +; RV32I-NEXT: li a0, 10 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -58,7 +58,7 @@ ; ; RV32IM-LABEL: urem_constant_lhs: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, 10 +; RV32IM-NEXT: li a1, 10 ; RV32IM-NEXT: remu a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -68,7 +68,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a1, a0, 32 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -76,7 +76,7 @@ ; ; RV64IM-LABEL: urem_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: remuw a0, a1, a0 ; RV64IM-NEXT: ret %1 = urem i32 10, %a @@ -207,7 +207,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: addi a0, zero, -10 +; RV32I-NEXT: li a0, -10 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -215,7 +215,7 @@ ; ; RV32IM-LABEL: srem_constant_lhs: ; RV32IM: # %bb.0: -; RV32IM-NEXT: addi a1, zero, -10 +; RV32IM-NEXT: li a1, -10 ; RV32IM-NEXT: rem a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -224,7 +224,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a1, a0 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -232,7 +232,7 @@ ; ; RV64IM-LABEL: srem_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: remw a0, a1, a0 ; RV64IM-NEXT: ret %1 = srem i32 -10, %a @@ -282,8 +282,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: addi a0, zero, 10 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, 10 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __umoddi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -295,8 +295,8 @@ ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IM-NEXT: mv a3, a1 ; RV32IM-NEXT: mv a2, a0 -; RV32IM-NEXT: addi a0, zero, 10 -; RV32IM-NEXT: mv a1, zero +; RV32IM-NEXT: li a0, 10 +; RV32IM-NEXT: li a1, 0 ; RV32IM-NEXT: call __umoddi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -307,7 +307,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -315,7 +315,7 @@ ; ; RV64IM-LABEL: urem64_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: remu a0, a1, a0 ; RV64IM-NEXT: ret %1 = urem i64 10, %a @@ -365,8 +365,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: addi a0, zero, -10 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a0, -10 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: call __moddi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -378,8 +378,8 @@ ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IM-NEXT: mv a3, a1 ; RV32IM-NEXT: mv a2, a0 -; RV32IM-NEXT: addi a0, zero, -10 -; RV32IM-NEXT: addi a1, zero, -1 +; RV32IM-NEXT: li a0, -10 +; RV32IM-NEXT: li a1, -1 ; RV32IM-NEXT: call __moddi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -390,7 +390,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -398,7 +398,7 @@ ; ; RV64IM-LABEL: srem64_constant_lhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: rem a0, a1, a0 ; RV64IM-NEXT: ret %1 = srem i64 -10, %a @@ -451,7 +451,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: andi a1, a0, 255 -; RV32I-NEXT: addi a0, zero, 10 +; RV32I-NEXT: li a0, 10 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -460,7 +460,7 @@ ; RV32IM-LABEL: urem8_constant_lhs: ; RV32IM: # %bb.0: ; RV32IM-NEXT: andi a0, a0, 255 -; RV32IM-NEXT: addi a1, zero, 10 +; RV32IM-NEXT: li a1, 10 ; RV32IM-NEXT: remu a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -469,7 +469,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: andi a1, a0, 255 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -478,7 +478,7 @@ ; RV64IM-LABEL: urem8_constant_lhs: ; RV64IM: # %bb.0: ; RV64IM-NEXT: andi a0, a0, 255 -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: remuw a0, a1, a0 ; RV64IM-NEXT: ret %1 = urem i8 10, %a @@ -541,7 +541,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a1, a0, 24 -; RV32I-NEXT: addi a0, zero, -10 +; RV32I-NEXT: li a0, -10 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -551,7 +551,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 24 ; RV32IM-NEXT: srai a0, a0, 24 -; RV32IM-NEXT: addi a1, zero, -10 +; RV32IM-NEXT: li a1, -10 ; RV32IM-NEXT: rem a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -561,7 +561,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a1, a0, 56 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -571,7 +571,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srai a0, a0, 56 -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: remw a0, a1, a0 ; RV64IM-NEXT: ret %1 = srem i8 -10, %a @@ -635,7 +635,7 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a1, a0, a1 -; RV32I-NEXT: addi a0, zero, 10 +; RV32I-NEXT: li a0, 10 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -646,7 +646,7 @@ ; RV32IM-NEXT: lui a1, 16 ; RV32IM-NEXT: addi a1, a1, -1 ; RV32IM-NEXT: and a0, a0, a1 -; RV32IM-NEXT: addi a1, zero, 10 +; RV32IM-NEXT: li a1, 10 ; RV32IM-NEXT: remu a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -657,7 +657,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a1, a0, a1 -; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: li a0, 10 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -668,7 +668,7 @@ ; RV64IM-NEXT: lui a1, 16 ; RV64IM-NEXT: addiw a1, a1, -1 ; RV64IM-NEXT: and a0, a0, a1 -; RV64IM-NEXT: addi a1, zero, 10 +; RV64IM-NEXT: li a1, 10 ; RV64IM-NEXT: remuw a0, a1, a0 ; RV64IM-NEXT: ret %1 = urem i16 10, %a @@ -730,7 +730,7 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a1, a0, 16 -; RV32I-NEXT: addi a0, zero, -10 +; RV32I-NEXT: li a0, -10 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -740,7 +740,7 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 16 ; RV32IM-NEXT: srai a0, a0, 16 -; RV32IM-NEXT: addi a1, zero, -10 +; RV32IM-NEXT: li a1, -10 ; RV32IM-NEXT: rem a0, a1, a0 ; RV32IM-NEXT: ret ; @@ -750,7 +750,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a1, a0, 48 -; RV64I-NEXT: addi a0, zero, -10 +; RV64I-NEXT: li a0, -10 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -760,7 +760,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: slli a0, a0, 48 ; RV64IM-NEXT: srai a0, a0, 48 -; RV64IM-NEXT: addi a1, zero, -10 +; RV64IM-NEXT: li a1, -10 ; RV64IM-NEXT: remw a0, a1, a0 ; RV64IM-NEXT: ret %1 = srem i16 -10, %a diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll --- a/llvm/test/CodeGen/RISCV/remat.ll +++ b/llvm/test/CodeGen/RISCV/remat.ll @@ -69,7 +69,7 @@ ; RV32I-NEXT: lw a2, %lo(c)(s10) ; RV32I-NEXT: lw a3, %lo(d)(s1) ; RV32I-NEXT: lw a4, %lo(e)(s0) -; RV32I-NEXT: addi a5, zero, 32 +; RV32I-NEXT: li a5, 32 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: .LBB0_5: # %if.end ; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 @@ -82,7 +82,7 @@ ; RV32I-NEXT: lw a2, %lo(d)(s1) ; RV32I-NEXT: lw a3, %lo(e)(s0) ; RV32I-NEXT: lw a4, %lo(f)(s7) -; RV32I-NEXT: addi a5, zero, 64 +; RV32I-NEXT: li a5, 64 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: .LBB0_7: # %if.end5 ; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 @@ -95,7 +95,7 @@ ; RV32I-NEXT: lw a2, %lo(e)(s0) ; RV32I-NEXT: lw a3, %lo(f)(s7) ; RV32I-NEXT: lw a4, %lo(g)(s8) -; RV32I-NEXT: addi a5, zero, 32 +; RV32I-NEXT: li a5, 32 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: .LBB0_9: # %if.end9 ; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 @@ -108,11 +108,11 @@ ; RV32I-NEXT: lw a2, %lo(f)(s7) ; RV32I-NEXT: lw a3, %lo(g)(s8) ; RV32I-NEXT: lw a4, %lo(h)(s9) -; RV32I-NEXT: addi a5, zero, 32 +; RV32I-NEXT: li a5, 32 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: j .LBB0_2 ; RV32I-NEXT: .LBB0_11: # %for.end -; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: lw s11, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s10, 16(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s9, 20(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -8,7 +8,7 @@ define i32 @rotl(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotl: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 32 +; RV32I-NEXT: li a2, 32 ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: sll a1, a0, a1 ; RV32I-NEXT: srl a0, a0, a2 @@ -24,7 +24,7 @@ define i32 @rotr(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotr: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 32 +; RV32I-NEXT: li a2, 32 ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: srl a1, a0, a1 ; RV32I-NEXT: sll a0, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll --- a/llvm/test/CodeGen/RISCV/rv32zba.ll +++ b/llvm/test/CodeGen/RISCV/rv32zba.ll @@ -63,7 +63,7 @@ define i32 @addmul6(i32 %a, i32 %b) { ; RV32I-LABEL: addmul6: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 6 +; RV32I-NEXT: li a2, 6 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -81,7 +81,7 @@ define i32 @addmul10(i32 %a, i32 %b) { ; RV32I-LABEL: addmul10: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 10 +; RV32I-NEXT: li a2, 10 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -99,7 +99,7 @@ define i32 @addmul12(i32 %a, i32 %b) { ; RV32I-LABEL: addmul12: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 12 +; RV32I-NEXT: li a2, 12 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -117,7 +117,7 @@ define i32 @addmul18(i32 %a, i32 %b) { ; RV32I-LABEL: addmul18: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 18 +; RV32I-NEXT: li a2, 18 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -135,7 +135,7 @@ define i32 @addmul20(i32 %a, i32 %b) { ; RV32I-LABEL: addmul20: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 20 +; RV32I-NEXT: li a2, 20 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -153,7 +153,7 @@ define i32 @addmul24(i32 %a, i32 %b) { ; RV32I-LABEL: addmul24: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 24 +; RV32I-NEXT: li a2, 24 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -171,7 +171,7 @@ define i32 @addmul36(i32 %a, i32 %b) { ; RV32I-LABEL: addmul36: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 36 +; RV32I-NEXT: li a2, 36 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -189,7 +189,7 @@ define i32 @addmul40(i32 %a, i32 %b) { ; RV32I-LABEL: addmul40: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 40 +; RV32I-NEXT: li a2, 40 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -207,7 +207,7 @@ define i32 @addmul72(i32 %a, i32 %b) { ; RV32I-LABEL: addmul72: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 72 +; RV32I-NEXT: li a2, 72 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret @@ -225,7 +225,7 @@ define i32 @mul96(i32 %a) { ; RV32I-LABEL: mul96: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 96 +; RV32I-NEXT: li a1, 96 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -241,7 +241,7 @@ define i32 @mul160(i32 %a) { ; RV32I-LABEL: mul160: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 160 +; RV32I-NEXT: li a1, 160 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -257,7 +257,7 @@ define i32 @mul288(i32 %a) { ; RV32I-LABEL: mul288: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 288 +; RV32I-NEXT: li a1, 288 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -273,13 +273,13 @@ define i32 @mul258(i32 %a) { ; RV32I-LABEL: mul258: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 258 +; RV32I-NEXT: li a1, 258 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul258: ; RV32ZBA: # %bb.0: -; RV32ZBA-NEXT: addi a1, zero, 258 +; RV32ZBA-NEXT: li a1, 258 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 258 @@ -289,13 +289,13 @@ define i32 @mul260(i32 %a) { ; RV32I-LABEL: mul260: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 260 +; RV32I-NEXT: li a1, 260 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul260: ; RV32ZBA: # %bb.0: -; RV32ZBA-NEXT: addi a1, zero, 260 +; RV32ZBA-NEXT: li a1, 260 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 260 @@ -305,13 +305,13 @@ define i32 @mul264(i32 %a) { ; RV32I-LABEL: mul264: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 264 +; RV32I-NEXT: li a1, 264 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul264: ; RV32ZBA: # %bb.0: -; RV32ZBA-NEXT: addi a1, zero, 264 +; RV32ZBA-NEXT: li a1, 264 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 264 @@ -321,7 +321,7 @@ define i32 @mul11(i32 %a) { ; RV32I-LABEL: mul11: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 11 +; RV32I-NEXT: li a1, 11 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -337,7 +337,7 @@ define i32 @mul19(i32 %a) { ; RV32I-LABEL: mul19: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 19 +; RV32I-NEXT: li a1, 19 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -353,7 +353,7 @@ define i32 @mul13(i32 %a) { ; RV32I-LABEL: mul13: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 13 +; RV32I-NEXT: li a1, 13 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -369,7 +369,7 @@ define i32 @mul21(i32 %a) { ; RV32I-LABEL: mul21: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 21 +; RV32I-NEXT: li a1, 21 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -385,7 +385,7 @@ define i32 @mul37(i32 %a) { ; RV32I-LABEL: mul37: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 37 +; RV32I-NEXT: li a1, 37 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -401,7 +401,7 @@ define i32 @mul25(i32 %a) { ; RV32I-LABEL: mul25: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 25 +; RV32I-NEXT: li a1, 25 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -417,7 +417,7 @@ define i32 @mul41(i32 %a) { ; RV32I-LABEL: mul41: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 41 +; RV32I-NEXT: li a1, 41 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -433,7 +433,7 @@ define i32 @mul73(i32 %a) { ; RV32I-LABEL: mul73: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 73 +; RV32I-NEXT: li a1, 73 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -449,7 +449,7 @@ define i32 @mul27(i32 %a) { ; RV32I-LABEL: mul27: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 27 +; RV32I-NEXT: li a1, 27 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -465,7 +465,7 @@ define i32 @mul45(i32 %a) { ; RV32I-LABEL: mul45: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 45 +; RV32I-NEXT: li a1, 45 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -481,7 +481,7 @@ define i32 @mul81(i32 %a) { ; RV32I-LABEL: mul81: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 81 +; RV32I-NEXT: li a1, 81 ; RV32I-NEXT: mul a0, a0, a1 ; RV32I-NEXT: ret ; @@ -555,7 +555,7 @@ ; ; RV32ZBA-LABEL: add4104: ; RV32ZBA: # %bb.0: -; RV32ZBA-NEXT: addi a1, zero, 1026 +; RV32ZBA-NEXT: li a1, 1026 ; RV32ZBA-NEXT: sh2add a0, a1, a0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 4104 @@ -572,7 +572,7 @@ ; ; RV32ZBA-LABEL: add8208: ; RV32ZBA: # %bb.0: -; RV32ZBA-NEXT: addi a1, zero, 1026 +; RV32ZBA-NEXT: li a1, 1026 ; RV32ZBA-NEXT: sh3add a0, a1, a0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 8208 diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll @@ -179,7 +179,7 @@ ; RV32I-NEXT: mv a7, a1 ; RV32I-NEXT: andi a1, a2, 63 ; RV32I-NEXT: addi t0, a1, -32 -; RV32I-NEXT: addi a6, zero, 31 +; RV32I-NEXT: li a6, 31 ; RV32I-NEXT: bltz t0, .LBB7_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, t0 @@ -220,7 +220,7 @@ ; RV32ZBB-NEXT: mv a7, a1 ; RV32ZBB-NEXT: andi a1, a2, 63 ; RV32ZBB-NEXT: addi t0, a1, -32 -; RV32ZBB-NEXT: addi a6, zero, 31 +; RV32ZBB-NEXT: li a6, 31 ; RV32ZBB-NEXT: bltz t0, .LBB7_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: sll a1, a0, t0 @@ -261,7 +261,7 @@ ; RV32ZBP-NEXT: mv a7, a1 ; RV32ZBP-NEXT: andi a1, a2, 63 ; RV32ZBP-NEXT: addi t0, a1, -32 -; RV32ZBP-NEXT: addi a6, zero, 31 +; RV32ZBP-NEXT: li a6, 31 ; RV32ZBP-NEXT: bltz t0, .LBB7_2 ; RV32ZBP-NEXT: # %bb.1: ; RV32ZBP-NEXT: sll a1, a0, t0 @@ -335,7 +335,7 @@ ; RV32I-NEXT: mv t0, a0 ; RV32I-NEXT: andi a0, a2, 63 ; RV32I-NEXT: addi a7, a0, -32 -; RV32I-NEXT: addi a6, zero, 31 +; RV32I-NEXT: li a6, 31 ; RV32I-NEXT: bltz a7, .LBB9_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a7 @@ -376,7 +376,7 @@ ; RV32ZBB-NEXT: mv t0, a0 ; RV32ZBB-NEXT: andi a0, a2, 63 ; RV32ZBB-NEXT: addi a7, a0, -32 -; RV32ZBB-NEXT: addi a6, zero, 31 +; RV32ZBB-NEXT: li a6, 31 ; RV32ZBB-NEXT: bltz a7, .LBB9_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: srl a0, a1, a7 @@ -417,7 +417,7 @@ ; RV32ZBP-NEXT: mv t0, a0 ; RV32ZBP-NEXT: andi a0, a2, 63 ; RV32ZBP-NEXT: addi a7, a0, -32 -; RV32ZBP-NEXT: addi a6, zero, 31 +; RV32ZBP-NEXT: li a6, 31 ; RV32ZBP-NEXT: bltz a7, .LBB9_2 ; RV32ZBP-NEXT: # %bb.1: ; RV32ZBP-NEXT: srl a0, a1, a7 diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -48,7 +48,7 @@ ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: li a0, 32 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: ctlz_i32: @@ -138,7 +138,7 @@ ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: srli a0, s2, 24 ; RV32I-NEXT: .LBB1_3: -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload @@ -156,11 +156,11 @@ ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: clz a0, a0 ; RV32ZBB-NEXT: addi a0, a0, 32 -; RV32ZBB-NEXT: mv a1, zero +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret ; RV32ZBB-NEXT: .LBB1_2: ; RV32ZBB-NEXT: clz a0, a1 -; RV32ZBB-NEXT: mv a1, zero +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret %1 = call i64 @llvm.ctlz.i64(i64 %a, i1 false) ret i64 %1 @@ -202,7 +202,7 @@ ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB2_2: -; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: li a0, 32 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: cttz_i32: @@ -276,7 +276,7 @@ ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: srli a0, s2, 24 ; RV32I-NEXT: .LBB3_3: -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload @@ -294,11 +294,11 @@ ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: ctz a0, a1 ; RV32ZBB-NEXT: addi a0, a0, 32 -; RV32ZBB-NEXT: mv a1, zero +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret ; RV32ZBB-NEXT: .LBB3_2: ; RV32ZBB-NEXT: ctz a0, a0 -; RV32ZBB-NEXT: mv a1, zero +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret %1 = call i64 @llvm.cttz.i64(i64 %a, i1 false) ret i64 %1 @@ -392,7 +392,7 @@ ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: add a0, a0, s5 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload @@ -408,7 +408,7 @@ ; RV32ZBB-NEXT: cpop a1, a1 ; RV32ZBB-NEXT: cpop a0, a0 ; RV32ZBB-NEXT: add a0, a0, a1 -; RV32ZBB-NEXT: mv a1, zero +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret %1 = call i64 @llvm.ctpop.i64(i64 %a) ret i64 %1 @@ -795,13 +795,13 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: zexth_i64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: zext.h a0, a0 -; RV32ZBB-NEXT: mv a1, zero +; RV32ZBB-NEXT: li a1, 0 ; RV32ZBB-NEXT: ret %and = and i64 %a, 65535 ret i64 %and diff --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll @@ -2944,13 +2944,13 @@ ; RV32I-NEXT: slli a1, a2, 24 ; RV32I-NEXT: srli a1, a1, 16 ; RV32I-NEXT: or a0, a1, a0 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: packh_i64: ; RV32ZBP: # %bb.0: ; RV32ZBP-NEXT: packh a0, a0, a2 -; RV32ZBP-NEXT: mv a1, zero +; RV32ZBP-NEXT: li a1, 0 ; RV32ZBP-NEXT: ret %and = and i64 %a, 255 %and1 = shl i64 %b, 8 @@ -2981,13 +2981,13 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: zexth_i64: ; RV32ZBP: # %bb.0: ; RV32ZBP-NEXT: zext.h a0, a0 -; RV32ZBP-NEXT: mv a1, zero +; RV32ZBP-NEXT: li a1, 0 ; RV32ZBP-NEXT: ret %and = and i64 %a, 65535 ret i64 %and diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll --- a/llvm/test/CodeGen/RISCV/rv32zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -7,7 +7,7 @@ define i32 @sbclr_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sbclr_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: sll a1, a2, a1 ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: and a0, a1, a0 @@ -27,7 +27,7 @@ define i32 @sbclr_i32_no_mask(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sbclr_i32_no_mask: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: sll a1, a2, a1 ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: and a0, a1, a0 @@ -48,7 +48,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a3, a2, 63 ; RV32I-NEXT: addi a4, a3, -32 -; RV32I-NEXT: addi a3, zero, 1 +; RV32I-NEXT: li a3, 1 ; RV32I-NEXT: bltz a4, .LBB2_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a2, a3, a4 @@ -82,7 +82,7 @@ define i32 @sbset_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sbset_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: sll a1, a2, a1 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret @@ -100,7 +100,7 @@ define i32 @sbset_i32_no_mask(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sbset_i32_no_mask: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: sll a1, a2, a1 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret @@ -118,7 +118,7 @@ define signext i32 @sbset_i32_zero(i32 signext %a) nounwind { ; RV32I-LABEL: sbset_i32_zero: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 1 +; RV32I-NEXT: li a1, 1 ; RV32I-NEXT: sll a0, a1, a0 ; RV32I-NEXT: ret ; @@ -138,7 +138,7 @@ define i64 @sbset_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: sbset_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a3, zero, 1 +; RV32I-NEXT: li a3, 1 ; RV32I-NEXT: sll a2, a3, a2 ; RV32I-NEXT: srai a3, a2, 31 ; RV32I-NEXT: or a0, a2, a0 @@ -164,14 +164,14 @@ ; RV32I-LABEL: sbset_i64_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, a0, -32 -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: bltz a1, .LBB7_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: sll a1, a2, a1 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB7_2: -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: sll a0, a2, a0 ; RV32I-NEXT: ret ; @@ -180,11 +180,11 @@ ; RV32ZBS-NEXT: addi a1, a0, -32 ; RV32ZBS-NEXT: bltz a1, .LBB7_2 ; RV32ZBS-NEXT: # %bb.1: -; RV32ZBS-NEXT: mv a0, zero +; RV32ZBS-NEXT: li a0, 0 ; RV32ZBS-NEXT: bset a1, zero, a1 ; RV32ZBS-NEXT: ret ; RV32ZBS-NEXT: .LBB7_2: -; RV32ZBS-NEXT: mv a1, zero +; RV32ZBS-NEXT: li a1, 0 ; RV32ZBS-NEXT: bset a0, zero, a0 ; RV32ZBS-NEXT: ret %shl = shl i64 1, %a @@ -194,7 +194,7 @@ define i32 @sbinv_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sbinv_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: sll a1, a2, a1 ; RV32I-NEXT: xor a0, a1, a0 ; RV32I-NEXT: ret @@ -217,7 +217,7 @@ define i64 @sbinv_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: sbinv_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a3, zero, 1 +; RV32I-NEXT: li a3, 1 ; RV32I-NEXT: sll a2, a3, a2 ; RV32I-NEXT: srai a3, a2, 31 ; RV32I-NEXT: xor a0, a2, a0 @@ -288,14 +288,14 @@ ; RV32I-NEXT: j .LBB12_3 ; RV32I-NEXT: .LBB12_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a2, zero, 31 +; RV32I-NEXT: li a2, 31 ; RV32I-NEXT: sub a2, a2, a3 ; RV32I-NEXT: slli a1, a1, 1 ; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: .LBB12_3: ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: sbext_i64: @@ -308,14 +308,14 @@ ; RV32ZBS-NEXT: j .LBB12_3 ; RV32ZBS-NEXT: .LBB12_2: ; RV32ZBS-NEXT: srl a0, a0, a2 -; RV32ZBS-NEXT: addi a2, zero, 31 +; RV32ZBS-NEXT: li a2, 31 ; RV32ZBS-NEXT: sub a2, a2, a3 ; RV32ZBS-NEXT: slli a1, a1, 1 ; RV32ZBS-NEXT: sll a1, a1, a2 ; RV32ZBS-NEXT: or a0, a0, a1 ; RV32ZBS-NEXT: .LBB12_3: ; RV32ZBS-NEXT: andi a0, a0, 1 -; RV32ZBS-NEXT: mv a1, zero +; RV32ZBS-NEXT: li a1, 0 ; RV32ZBS-NEXT: ret %conv = and i64 %b, 63 %shr = lshr i64 %a, %conv @@ -344,13 +344,13 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 5 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: sbexti_i64: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: bexti a0, a0, 5 -; RV32ZBS-NEXT: mv a1, zero +; RV32ZBS-NEXT: li a1, 0 ; RV32ZBS-NEXT: ret %shr = lshr i64 %a, 5 %and = and i64 %shr, 1 diff --git a/llvm/test/CodeGen/RISCV/rv32zbt.ll b/llvm/test/CodeGen/RISCV/rv32zbt.ll --- a/llvm/test/CodeGen/RISCV/rv32zbt.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbt.ll @@ -342,7 +342,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a5, a4, 63 ; RV32I-NEXT: addi a7, a5, -32 -; RV32I-NEXT: addi a6, zero, 31 +; RV32I-NEXT: li a6, 31 ; RV32I-NEXT: bltz a7, .LBB13_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a7 @@ -386,7 +386,7 @@ ; RV32ZBT: # %bb.0: ; RV32ZBT-NEXT: sll a7, a1, a4 ; RV32ZBT-NEXT: andi a5, a4, 63 -; RV32ZBT-NEXT: addi a6, zero, 31 +; RV32ZBT-NEXT: li a6, 31 ; RV32ZBT-NEXT: sub t0, a6, a5 ; RV32ZBT-NEXT: srli a1, a0, 1 ; RV32ZBT-NEXT: srl a1, a1, t0 @@ -456,7 +456,7 @@ ; RV32I-NEXT: mv t0, a0 ; RV32I-NEXT: andi a0, a4, 63 ; RV32I-NEXT: addi a6, a0, -32 -; RV32I-NEXT: addi a7, zero, 31 +; RV32I-NEXT: li a7, 31 ; RV32I-NEXT: bltz a6, .LBB15_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a3, a6 @@ -501,7 +501,7 @@ ; RV32ZBT: # %bb.0: ; RV32ZBT-NEXT: srl a7, a2, a4 ; RV32ZBT-NEXT: andi a5, a4, 63 -; RV32ZBT-NEXT: addi a6, zero, 31 +; RV32ZBT-NEXT: li a6, 31 ; RV32ZBT-NEXT: sub t0, a6, a5 ; RV32ZBT-NEXT: slli a2, a3, 1 ; RV32ZBT-NEXT: sll a2, a2, t0 diff --git a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll --- a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll @@ -125,7 +125,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slt a0, a1, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -357,7 +357,7 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: mv a0, a2 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3@plt ; RV64I-NEXT: lui a1, 524288 ; RV64I-NEXT: xor a2, a0, a1 @@ -385,11 +385,11 @@ ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: mv s2, a1 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3@plt ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3@plt ; RV64I-NEXT: lui a2, 524288 ; RV64I-NEXT: xor a1, s1, a2 @@ -420,7 +420,7 @@ ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: mv s1, a1 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3@plt ; RV64I-NEXT: lui a1, 524288 ; RV64I-NEXT: xor a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll --- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: addw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ret entry: %cmp6 = icmp slt i32 %s, %n @@ -65,7 +65,7 @@ ; CHECK-NEXT: subw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ret entry: %cmp6 = icmp slt i32 %s, %n diff --git a/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll --- a/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll +++ b/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll @@ -4,10 +4,10 @@ define signext i32 @mulw(i32 signext %s, i32 signext %n, i32 signext %k) nounwind { ; CHECK-LABEL: mulw: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1 +; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: bge a0, a1, .LBB0_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader -; CHECK-NEXT: addi a2, zero, 1 +; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: mulw a2, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -318,7 +318,7 @@ define i64 @addmul6(i64 %a, i64 %b) { ; RV64I-LABEL: addmul6: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 6 +; RV64I-NEXT: li a2, 6 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -336,7 +336,7 @@ define i64 @addmul10(i64 %a, i64 %b) { ; RV64I-LABEL: addmul10: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 10 +; RV64I-NEXT: li a2, 10 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -354,7 +354,7 @@ define i64 @addmul12(i64 %a, i64 %b) { ; RV64I-LABEL: addmul12: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 12 +; RV64I-NEXT: li a2, 12 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -372,7 +372,7 @@ define i64 @addmul18(i64 %a, i64 %b) { ; RV64I-LABEL: addmul18: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 18 +; RV64I-NEXT: li a2, 18 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -390,7 +390,7 @@ define i64 @addmul20(i64 %a, i64 %b) { ; RV64I-LABEL: addmul20: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 20 +; RV64I-NEXT: li a2, 20 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -408,7 +408,7 @@ define i64 @addmul24(i64 %a, i64 %b) { ; RV64I-LABEL: addmul24: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 24 +; RV64I-NEXT: li a2, 24 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -426,7 +426,7 @@ define i64 @addmul36(i64 %a, i64 %b) { ; RV64I-LABEL: addmul36: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 36 +; RV64I-NEXT: li a2, 36 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -444,7 +444,7 @@ define i64 @addmul40(i64 %a, i64 %b) { ; RV64I-LABEL: addmul40: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 40 +; RV64I-NEXT: li a2, 40 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -462,7 +462,7 @@ define i64 @addmul72(i64 %a, i64 %b) { ; RV64I-LABEL: addmul72: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 72 +; RV64I-NEXT: li a2, 72 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret @@ -480,7 +480,7 @@ define i64 @mul96(i64 %a) { ; RV64I-LABEL: mul96: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 96 +; RV64I-NEXT: li a1, 96 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -496,7 +496,7 @@ define i64 @mul160(i64 %a) { ; RV64I-LABEL: mul160: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 160 +; RV64I-NEXT: li a1, 160 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -512,7 +512,7 @@ define i64 @mul288(i64 %a) { ; RV64I-LABEL: mul288: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 288 +; RV64I-NEXT: li a1, 288 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -654,13 +654,13 @@ define i64 @mul258(i64 %a) { ; RV64I-LABEL: mul258: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 258 +; RV64I-NEXT: li a1, 258 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul258: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addi a1, zero, 258 +; RV64ZBA-NEXT: li a1, 258 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 258 @@ -670,13 +670,13 @@ define i64 @mul260(i64 %a) { ; RV64I-LABEL: mul260: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 260 +; RV64I-NEXT: li a1, 260 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul260: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addi a1, zero, 260 +; RV64ZBA-NEXT: li a1, 260 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 260 @@ -686,13 +686,13 @@ define i64 @mul264(i64 %a) { ; RV64I-LABEL: mul264: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 264 +; RV64I-NEXT: li a1, 264 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul264: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addi a1, zero, 264 +; RV64ZBA-NEXT: li a1, 264 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 264 @@ -702,14 +702,14 @@ define i64 @imm_zextw() nounwind { ; RV64I-LABEL: imm_zextw: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: addi a0, a0, -2 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: imm_zextw: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addi a0, zero, -2 +; RV64ZBA-NEXT: li a0, -2 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret ret i64 4294967294 ; -2 in 32 bits. @@ -736,7 +736,7 @@ define i64 @mul11(i64 %a) { ; RV64I-LABEL: mul11: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 11 +; RV64I-NEXT: li a1, 11 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -752,7 +752,7 @@ define i64 @mul19(i64 %a) { ; RV64I-LABEL: mul19: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 19 +; RV64I-NEXT: li a1, 19 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -768,7 +768,7 @@ define i64 @mul13(i64 %a) { ; RV64I-LABEL: mul13: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 13 +; RV64I-NEXT: li a1, 13 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -784,7 +784,7 @@ define i64 @mul21(i64 %a) { ; RV64I-LABEL: mul21: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 21 +; RV64I-NEXT: li a1, 21 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -800,7 +800,7 @@ define i64 @mul37(i64 %a) { ; RV64I-LABEL: mul37: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 37 +; RV64I-NEXT: li a1, 37 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -816,7 +816,7 @@ define i64 @mul25(i64 %a) { ; RV64I-LABEL: mul25: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 25 +; RV64I-NEXT: li a1, 25 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -832,7 +832,7 @@ define i64 @mul41(i64 %a) { ; RV64I-LABEL: mul41: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 41 +; RV64I-NEXT: li a1, 41 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -848,7 +848,7 @@ define i64 @mul73(i64 %a) { ; RV64I-LABEL: mul73: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 73 +; RV64I-NEXT: li a1, 73 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -864,7 +864,7 @@ define i64 @mul27(i64 %a) { ; RV64I-LABEL: mul27: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 27 +; RV64I-NEXT: li a1, 27 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -880,7 +880,7 @@ define i64 @mul45(i64 %a) { ; RV64I-LABEL: mul45: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 45 +; RV64I-NEXT: li a1, 45 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -896,7 +896,7 @@ define i64 @mul81(i64 %a) { ; RV64I-LABEL: mul81: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 81 +; RV64I-NEXT: li a1, 81 ; RV64I-NEXT: mul a0, a0, a1 ; RV64I-NEXT: ret ; @@ -963,7 +963,7 @@ define signext i32 @mulw192(i32 signext %a) { ; RV64I-LABEL: mulw192: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 192 +; RV64I-NEXT: li a1, 192 ; RV64I-NEXT: mulw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -979,7 +979,7 @@ define signext i32 @mulw320(i32 signext %a) { ; RV64I-LABEL: mulw320: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 320 +; RV64I-NEXT: li a1, 320 ; RV64I-NEXT: mulw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -995,7 +995,7 @@ define signext i32 @mulw576(i32 signext %a) { ; RV64I-LABEL: mulw576: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 576 +; RV64I-NEXT: li a1, 576 ; RV64I-NEXT: mulw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -1018,7 +1018,7 @@ ; ; RV64ZBA-LABEL: add4104: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addi a1, zero, 1026 +; RV64ZBA-NEXT: li a1, 1026 ; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: ret %c = add i64 %a, 4104 @@ -1035,7 +1035,7 @@ ; ; RV64ZBA-LABEL: add8208: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addi a1, zero, 1026 +; RV64ZBA-NEXT: li a1, 1026 ; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: ret %c = add i64 %a, 8208 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll @@ -186,7 +186,7 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ; RV64I-LABEL: rol_i32_neg_constant_rhs: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -2 +; RV64I-NEXT: li a1, -2 ; RV64I-NEXT: sllw a2, a1, a0 ; RV64I-NEXT: negw a0, a0 ; RV64I-NEXT: srlw a0, a1, a0 @@ -195,13 +195,13 @@ ; ; RV64ZBB-LABEL: rol_i32_neg_constant_rhs: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: addi a1, zero, -2 +; RV64ZBB-NEXT: li a1, -2 ; RV64ZBB-NEXT: rolw a0, a1, a0 ; RV64ZBB-NEXT: ret ; ; RV64ZBP-LABEL: rol_i32_neg_constant_rhs: ; RV64ZBP: # %bb.0: -; RV64ZBP-NEXT: addi a1, zero, -2 +; RV64ZBP-NEXT: li a1, -2 ; RV64ZBP-NEXT: rolw a0, a1, a0 ; RV64ZBP-NEXT: ret %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a) @@ -286,7 +286,7 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ; RV64I-LABEL: ror_i32_neg_constant_rhs: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -2 +; RV64I-NEXT: li a1, -2 ; RV64I-NEXT: srlw a2, a1, a0 ; RV64I-NEXT: negw a0, a0 ; RV64I-NEXT: sllw a0, a1, a0 @@ -295,13 +295,13 @@ ; ; RV64ZBB-LABEL: ror_i32_neg_constant_rhs: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: addi a1, zero, -2 +; RV64ZBB-NEXT: li a1, -2 ; RV64ZBB-NEXT: rorw a0, a1, a0 ; RV64ZBB-NEXT: ret ; ; RV64ZBP-LABEL: ror_i32_neg_constant_rhs: ; RV64ZBP: # %bb.0: -; RV64ZBP-NEXT: addi a1, zero, -2 +; RV64ZBP-NEXT: li a1, -2 ; RV64ZBP-NEXT: rorw a0, a1, a0 ; RV64ZBP-NEXT: ret %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a) diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -48,7 +48,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB0_2: -; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: ctlz_i32: @@ -101,16 +101,16 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: j .LBB1_3 ; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: .LBB1_3: # %cond.end -; RV64I-NEXT: addi a1, zero, 31 +; RV64I-NEXT: li a1, 31 ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: log2_i32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: clzw a0, a0 -; RV64ZBB-NEXT: addi a1, zero, 31 +; RV64ZBB-NEXT: li a1, 31 ; RV64ZBB-NEXT: sub a0, a1, a0 ; RV64ZBB-NEXT: ret %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false) @@ -125,8 +125,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: addiw a0, a0, -1 -; RV64I-NEXT: addi s0, zero, 32 -; RV64I-NEXT: addi a1, zero, 32 +; RV64I-NEXT: li s0, 32 +; RV64I-NEXT: li a1, 32 ; RV64I-NEXT: beqz a0, .LBB2_2 ; RV64I-NEXT: # %bb.1: # %cond.false ; RV64I-NEXT: srliw a1, a0, 1 @@ -171,7 +171,7 @@ ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: addiw a0, a0, -1 ; RV64ZBB-NEXT: clzw a0, a0 -; RV64ZBB-NEXT: addi a1, zero, 32 +; RV64ZBB-NEXT: li a1, 32 ; RV64ZBB-NEXT: sub a0, a1, a0 ; RV64ZBB-NEXT: ret %1 = sub i32 %a, 1 @@ -218,7 +218,7 @@ ; RV64I-NEXT: addiw a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: beqz s0, .LBB3_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srliw a0, a1, 24 @@ -232,7 +232,7 @@ ; RV64ZBB-LABEL: findLastSet_i32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: mv a1, a0 -; RV64ZBB-NEXT: addi a0, zero, -1 +; RV64ZBB-NEXT: li a0, -1 ; RV64ZBB-NEXT: beqz a1, .LBB3_2 ; RV64ZBB-NEXT: # %bb.1: ; RV64ZBB-NEXT: clzw a0, a1 @@ -293,7 +293,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_2: -; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: ctlz_lshr_i32: @@ -374,7 +374,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB5_2: -; RV64I-NEXT: addi a0, zero, 64 +; RV64I-NEXT: li a0, 64 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: ctlz_i64: @@ -421,7 +421,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: cttz_i32: @@ -502,7 +502,7 @@ ; RV64I-NEXT: addiw a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: beqz s0, .LBB8_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srliw a0, a1, 24 @@ -515,7 +515,7 @@ ; RV64ZBB-LABEL: findFirstSet_i32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: mv a1, a0 -; RV64ZBB-NEXT: addi a0, zero, -1 +; RV64ZBB-NEXT: li a0, -1 ; RV64ZBB-NEXT: beqz a1, .LBB8_2 ; RV64ZBB-NEXT: # %bb.1: ; RV64ZBB-NEXT: ctzw a0, a1 @@ -557,7 +557,7 @@ ; RV64I-NEXT: addiw a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: beqz s0, .LBB9_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srliw a0, a1, 24 @@ -571,7 +571,7 @@ ; RV64ZBB-LABEL: ffs_i32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: mv a1, a0 -; RV64ZBB-NEXT: mv a0, zero +; RV64ZBB-NEXT: li a0, 0 ; RV64ZBB-NEXT: beqz a1, .LBB9_2 ; RV64ZBB-NEXT: # %bb.1: ; RV64ZBB-NEXT: ctzw a0, a1 @@ -643,7 +643,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB10_2: -; RV64I-NEXT: addi a0, zero, 64 +; RV64I-NEXT: li a0, 64 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: cttz_i64: @@ -1141,7 +1141,7 @@ ; RV64I-NEXT: lui a2, 4080 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a2, a0, 8 -; RV64I-NEXT: addi a3, zero, 255 +; RV64I-NEXT: li a3, 255 ; RV64I-NEXT: slli a4, a3, 24 ; RV64I-NEXT: and a2, a2, a4 ; RV64I-NEXT: or a1, a2, a1 diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -2445,7 +2445,7 @@ ; RV64I-NEXT: lui a2, 4080 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a2, a0, 8 -; RV64I-NEXT: addi a3, zero, 255 +; RV64I-NEXT: li a3, 255 ; RV64I-NEXT: slli a4, a3, 24 ; RV64I-NEXT: and a2, a2, a4 ; RV64I-NEXT: or a1, a2, a1 @@ -2654,7 +2654,7 @@ ; RV64I-NEXT: lui a2, 4080 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srli a2, a0, 8 -; RV64I-NEXT: addi a3, zero, 255 +; RV64I-NEXT: li a3, 255 ; RV64I-NEXT: slli a4, a3, 24 ; RV64I-NEXT: and a2, a2, a4 ; RV64I-NEXT: or a1, a2, a1 @@ -2842,7 +2842,7 @@ ; RV64I-NEXT: lui a6, 4080 ; RV64I-NEXT: and a3, a2, a6 ; RV64I-NEXT: srli a4, a0, 8 -; RV64I-NEXT: addi a1, zero, 255 +; RV64I-NEXT: li a1, 255 ; RV64I-NEXT: slli a7, a1, 24 ; RV64I-NEXT: and a4, a4, a7 ; RV64I-NEXT: or a3, a4, a3 @@ -3209,7 +3209,7 @@ ; RV64I-NEXT: addi a1, a1, 255 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a2, a0, 8 -; RV64I-NEXT: addi a3, zero, 255 +; RV64I-NEXT: li a3, 255 ; RV64I-NEXT: slli a4, a3, 32 ; RV64I-NEXT: addi a4, a4, 255 ; RV64I-NEXT: slli a4, a4, 16 @@ -3241,7 +3241,7 @@ define i64 @shfl16(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: shfl16: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: addi a1, a1, 1 ; RV64I-NEXT: slli a1, a1, 16 @@ -3333,7 +3333,7 @@ ; RV64I-LABEL: packu_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: li a2, -1 ; RV64I-NEXT: slli a2, a2, 32 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll --- a/llvm/test/CodeGen/RISCV/rv64zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll @@ -7,7 +7,7 @@ define signext i32 @sbclr_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: sbclr_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: and a0, a1, a0 @@ -15,7 +15,7 @@ ; ; RV64ZBS-LABEL: sbclr_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: not a1, a1 ; RV64ZBS-NEXT: and a0, a1, a0 @@ -30,7 +30,7 @@ define signext i32 @sbclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: sbclr_i32_no_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: and a0, a1, a0 @@ -38,7 +38,7 @@ ; ; RV64ZBS-LABEL: sbclr_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: not a1, a1 ; RV64ZBS-NEXT: and a0, a1, a0 @@ -53,7 +53,7 @@ ; RV64I-LABEL: sbclr_i32_load: ; RV64I: # %bb.0: ; RV64I-NEXT: lw a0, 0(a0) -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: and a0, a1, a0 @@ -63,7 +63,7 @@ ; RV64ZBS-LABEL: sbclr_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: not a1, a1 ; RV64ZBS-NEXT: and a0, a1, a0 @@ -79,7 +79,7 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: sbclr_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sll a1, a2, a1 ; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: and a0, a1, a0 @@ -99,7 +99,7 @@ define i64 @sbclr_i64_no_mask(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: sbclr_i64_no_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sll a1, a2, a1 ; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: and a0, a1, a0 @@ -118,14 +118,14 @@ define signext i32 @sbset_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: sbset_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: sbset_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: or a0, a1, a0 ; RV64ZBS-NEXT: ret @@ -138,14 +138,14 @@ define signext i32 @sbset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: sbset_i32_no_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: sbset_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: or a0, a1, a0 ; RV64ZBS-NEXT: ret @@ -158,7 +158,7 @@ ; RV64I-LABEL: sbset_i32_load: ; RV64I: # %bb.0: ; RV64I-NEXT: lw a0, 0(a0) -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: sext.w a0, a0 @@ -167,7 +167,7 @@ ; RV64ZBS-LABEL: sbset_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: or a0, a1, a0 ; RV64ZBS-NEXT: sext.w a0, a0 @@ -182,13 +182,13 @@ define signext i32 @sbset_i32_zero(i32 signext %a) nounwind { ; RV64I-LABEL: sbset_i32_zero: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: sllw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: sbset_i32_zero: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a1, zero, 1 +; RV64ZBS-NEXT: li a1, 1 ; RV64ZBS-NEXT: sllw a0, a1, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %a @@ -198,7 +198,7 @@ define i64 @sbset_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: sbset_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sll a1, a2, a1 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret @@ -216,7 +216,7 @@ define i64 @sbset_i64_no_mask(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: sbset_i64_no_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sll a1, a2, a1 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret @@ -234,7 +234,7 @@ define signext i64 @sbset_i64_zero(i64 signext %a) nounwind { ; RV64I-LABEL: sbset_i64_zero: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: sll a0, a1, a0 ; RV64I-NEXT: ret ; @@ -249,14 +249,14 @@ define signext i32 @sbinv_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: sbinv_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: sbinv_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: xor a0, a1, a0 ; RV64ZBS-NEXT: ret @@ -269,14 +269,14 @@ define signext i32 @sbinv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: sbinv_i32_no_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: sbinv_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: xor a0, a1, a0 ; RV64ZBS-NEXT: ret @@ -289,7 +289,7 @@ ; RV64I-LABEL: sbinv_i32_load: ; RV64I: # %bb.0: ; RV64I-NEXT: lw a0, 0(a0) -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sllw a1, a2, a1 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: sext.w a0, a0 @@ -298,7 +298,7 @@ ; RV64ZBS-LABEL: sbinv_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: addi a2, zero, 1 +; RV64ZBS-NEXT: li a2, 1 ; RV64ZBS-NEXT: sllw a1, a2, a1 ; RV64ZBS-NEXT: xor a0, a1, a0 ; RV64ZBS-NEXT: sext.w a0, a0 @@ -312,7 +312,7 @@ define i64 @sbinv_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: sbinv_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sll a1, a2, a1 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: ret @@ -330,7 +330,7 @@ define i64 @sbinv_i64_no_mask(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: sbinv_i64_no_mask: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: sll a1, a2, a1 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: ret @@ -557,7 +557,7 @@ define i64 @sbclri_i64_31(i64 %a) nounwind { ; RV64I-LABEL: sbclri_i64_31: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: addi a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 @@ -574,7 +574,7 @@ define i64 @sbclri_i64_62(i64 %a) nounwind { ; RV64I-LABEL: sbclri_i64_62: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 62 ; RV64I-NEXT: addi a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 @@ -591,7 +591,7 @@ define i64 @sbclri_i64_63(i64 %a) nounwind { ; RV64I-LABEL: sbclri_i64_63: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: srli a1, a1, 1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret @@ -747,7 +747,7 @@ define i64 @sbseti_i64_31(i64 %a) nounwind { ; RV64I-LABEL: sbseti_i64_31: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret @@ -763,7 +763,7 @@ define i64 @sbseti_i64_62(i64 %a) nounwind { ; RV64I-LABEL: sbseti_i64_62: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: slli a1, a1, 62 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret @@ -779,7 +779,7 @@ define i64 @sbseti_i64_63(i64 %a) nounwind { ; RV64I-LABEL: sbseti_i64_63: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret @@ -901,7 +901,7 @@ define i64 @sbinvi_i64_31(i64 %a) nounwind { ; RV64I-LABEL: sbinvi_i64_31: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret @@ -917,7 +917,7 @@ define i64 @sbinvi_i64_62(i64 %a) nounwind { ; RV64I-LABEL: sbinvi_i64_62: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: slli a1, a1, 62 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret @@ -933,7 +933,7 @@ define i64 @sbinvi_i64_63(i64 %a) nounwind { ; RV64I-LABEL: sbinvi_i64_63: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret @@ -949,7 +949,7 @@ define i64 @xor_i64_large(i64 %a) nounwind { ; RV64I-LABEL: xor_i64_large: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: addi a1, a1, 1 ; RV64I-NEXT: xor a0, a0, a1 @@ -998,7 +998,7 @@ define i64 @or_i64_large(i64 %a) nounwind { ; RV64I-LABEL: or_i64_large: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: li a1, 1 ; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: addi a1, a1, 1 ; RV64I-NEXT: or a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll b/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll --- a/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll @@ -93,7 +93,7 @@ ; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 32 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, zero, 6 +; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -32 @@ -176,7 +176,7 @@ ; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 32 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, zero, 6 +; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -32 @@ -198,7 +198,7 @@ ; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 32 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, zero, 12 +; CHECK-NEXT: li a1, 12 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -32 @@ -222,7 +222,7 @@ ; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 32 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, zero, 12 +; CHECK-NEXT: li a1, 12 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -32 @@ -247,7 +247,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: sd a0, 8(sp) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 @@ -274,7 +274,7 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -32 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: sd a0, 8(sp) ; CHECK-NEXT: addi sp, s0, -32 ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -320,7 +320,7 @@ ; CHECK-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 64 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, zero, 30 +; CHECK-NEXT: li a1, 30 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -64 @@ -348,7 +348,7 @@ ; CHECK-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 64 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, zero, 30 +; CHECK-NEXT: li a1, 30 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: andi sp, sp, -64 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -74,7 +74,7 @@ ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: vl8re32.v v8, (a4) ; CHECK-NEXT: slli a4, a2, 4 -; CHECK-NEXT: addi a5, zero, 24 +; CHECK-NEXT: li a5, 24 ; CHECK-NEXT: mul a2, a2, a5 ; CHECK-NEXT: add a5, a1, a4 ; CHECK-NEXT: vl8re32.v v16, (a1) @@ -107,7 +107,7 @@ ; CHECK-NEXT: add a4, a1, a6 ; CHECK-NEXT: vl8re32.v v8, (a4) ; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: addi a4, zero, 24 +; CHECK-NEXT: li a4, 24 ; CHECK-NEXT: mul a3, a3, a4 ; CHECK-NEXT: add a3, sp, a3 ; CHECK-NEXT: addi a3, a3, 16 @@ -120,7 +120,7 @@ ; CHECK-NEXT: add a3, sp, a3 ; CHECK-NEXT: addi a3, a3, 16 ; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: addi a5, zero, 24 +; CHECK-NEXT: li a5, 24 ; CHECK-NEXT: mul t1, a2, a5 ; CHECK-NEXT: add a3, a1, t1 ; CHECK-NEXT: vl8re32.v v8, (a3) @@ -134,13 +134,13 @@ ; CHECK-NEXT: vl8re32.v v8, (a4) ; CHECK-NEXT: addi a3, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: addi a4, zero, 40 +; CHECK-NEXT: li a4, 40 ; CHECK-NEXT: mul a4, a2, a4 ; CHECK-NEXT: add t0, a1, a4 -; CHECK-NEXT: addi a5, zero, 48 +; CHECK-NEXT: li a5, 48 ; CHECK-NEXT: mul a5, a2, a5 ; CHECK-NEXT: add t2, a1, a5 -; CHECK-NEXT: addi a3, zero, 56 +; CHECK-NEXT: li a3, 56 ; CHECK-NEXT: mul a2, a2, a3 ; CHECK-NEXT: add a3, a1, a2 ; CHECK-NEXT: vl8re32.v v8, (a1) @@ -174,7 +174,7 @@ ; CHECK-NEXT: vs8r.v v8, (a1) ; CHECK-NEXT: add a0, a0, a6 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: addi a2, zero, 24 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a1, a1, a2 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 @@ -302,7 +302,7 @@ ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: vs8r.v v16, (a0) ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: addi a3, zero, 2 +; RV32-NEXT: li a3, 2 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v8, (a1) ; RV32-NEXT: vmv8r.v v8, v0 @@ -333,7 +333,7 @@ ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vs8r.v v16, (a0) ; RV64-NEXT: addi a0, sp, 24 -; RV64-NEXT: addi a3, zero, 2 +; RV64-NEXT: li a3, 2 ; RV64-NEXT: addi a1, sp, 24 ; RV64-NEXT: vs8r.v v8, (a1) ; RV64-NEXT: vmv8r.v v8, v0 @@ -357,7 +357,7 @@ ; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: addi a3, zero, 48 +; RV32-NEXT: li a3, 48 ; RV32-NEXT: mul a1, a1, a3 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: csrr a1, vlenb @@ -400,7 +400,7 @@ ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a2, a1, 16 -; RV32-NEXT: addi a5, zero, 42 +; RV32-NEXT: li a5, 42 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 @@ -415,7 +415,7 @@ ; RV32-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: call ext3@plt ; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: addi a1, zero, 48 +; RV32-NEXT: li a1, 48 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload @@ -429,7 +429,7 @@ ; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: addi a3, zero, 48 +; RV64-NEXT: li a3, 48 ; RV64-NEXT: mul a1, a1, a3 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: csrr a1, vlenb @@ -472,7 +472,7 @@ ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a2, a1, 24 -; RV64-NEXT: addi a5, zero, 42 +; RV64-NEXT: li a5, 42 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 @@ -487,7 +487,7 @@ ; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: call ext3@plt ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: addi a1, zero, 48 +; RV64-NEXT: li a1, 48 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -542,25 +542,25 @@ ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v8, (a0) -; RV32-NEXT: addi a1, zero, 1 -; RV32-NEXT: addi a2, zero, 2 -; RV32-NEXT: addi a3, zero, 3 -; RV32-NEXT: addi a4, zero, 4 -; RV32-NEXT: addi a5, zero, 5 -; RV32-NEXT: addi a6, zero, 6 -; RV32-NEXT: addi a7, zero, 7 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: li a2, 2 +; RV32-NEXT: li a3, 3 +; RV32-NEXT: li a4, 4 +; RV32-NEXT: li a5, 5 +; RV32-NEXT: li a6, 6 +; RV32-NEXT: li a7, 7 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi t2, a0, 16 ; RV32-NEXT: addi t4, sp, 16 -; RV32-NEXT: addi t6, zero, 8 +; RV32-NEXT: li t6, 8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v8, (a0) -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: vmv8r.v v16, v8 ; RV32-NEXT: call vector_arg_indirect_stack@plt ; RV32-NEXT: csrr a0, vlenb @@ -594,25 +594,25 @@ ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: addi a0, sp, 24 ; RV64-NEXT: vs8r.v v8, (a0) -; RV64-NEXT: addi a1, zero, 1 -; RV64-NEXT: addi a2, zero, 2 -; RV64-NEXT: addi a3, zero, 3 -; RV64-NEXT: addi a4, zero, 4 -; RV64-NEXT: addi a5, zero, 5 -; RV64-NEXT: addi a6, zero, 6 -; RV64-NEXT: addi a7, zero, 7 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: li a2, 2 +; RV64-NEXT: li a3, 3 +; RV64-NEXT: li a4, 4 +; RV64-NEXT: li a5, 5 +; RV64-NEXT: li a6, 6 +; RV64-NEXT: li a7, 7 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi t2, a0, 24 ; RV64-NEXT: addi t4, sp, 24 -; RV64-NEXT: addi t6, zero, 8 +; RV64-NEXT: li t6, 8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 24 ; RV64-NEXT: vs8r.v v8, (a0) -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: vmv8r.v v16, v8 ; RV64-NEXT: call vector_arg_indirect_stack@plt ; RV64-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll @@ -9,7 +9,7 @@ define <2 x i64> @add_umax_v2i64(<2 x i64> %a0) { ; CHECK-LABEL: add_umax_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 7 +; CHECK-NEXT: li a0, 7 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -21,7 +21,7 @@ define @add_umax_nxv2i64( %a0) { ; CHECK-LABEL: add_umax_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 7 +; CHECK-NEXT: li a0, 7 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -145,7 +145,7 @@ define <2 x i64> @vselect_add_const_v2i64(<2 x i64> %a0) { ; CHECK-LABEL: vselect_add_const_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 6 +; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @vselect_add_const_nxv2i64( %a0) { ; CHECK-LABEL: vselect_add_const_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 6 +; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll @@ -59,10 +59,10 @@ define @combine_vec_shl_shl( %x) { ; CHECK-LABEL: combine_vec_shl_shl: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: addi a0, zero, 4 +; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: vsll.vv v8, v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -14,10 +14,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -43,10 +43,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -72,10 +72,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -101,10 +101,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -130,10 +130,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v10, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -159,10 +159,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v12, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vsub.vv v8, v8, v12 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -188,10 +188,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v16, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -234,7 +234,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -267,7 +267,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -305,7 +305,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -338,7 +338,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -376,7 +376,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -409,7 +409,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -447,7 +447,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -480,7 +480,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -518,7 +518,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -551,7 +551,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -589,7 +589,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -622,7 +622,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1048,7 +1048,7 @@ ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v9 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi a0, sp, 8 @@ -1071,7 +1071,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v11 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vmul.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1089,7 +1089,7 @@ ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v9, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v9 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1134,7 +1134,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i64( %va, i1 false) @@ -1174,7 +1174,7 @@ ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v10, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi a0, sp, 8 @@ -1197,7 +1197,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v14 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vmul.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1215,7 +1215,7 @@ ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vsrl.vi v10, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1260,7 +1260,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i64( %va, i1 false) @@ -1300,7 +1300,7 @@ ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v12, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi a0, sp, 8 @@ -1323,7 +1323,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v20 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1341,7 +1341,7 @@ ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vsrl.vi v12, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1386,7 +1386,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i64( %va, i1 false) @@ -1426,7 +1426,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v16, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 @@ -1449,7 +1449,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1467,7 +1467,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1512,7 +1512,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i64( %va, i1 false) @@ -1532,10 +1532,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1560,10 +1560,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1588,10 +1588,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1616,10 +1616,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1644,10 +1644,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v10, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1672,10 +1672,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v12, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vsub.vv v8, v8, v12 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1700,10 +1700,10 @@ ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vsrl.vi v16, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1745,7 +1745,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -1778,7 +1778,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1815,7 +1815,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -1848,7 +1848,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1885,7 +1885,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -1918,7 +1918,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1955,7 +1955,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -1988,7 +1988,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -2025,7 +2025,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -2058,7 +2058,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -2095,7 +2095,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -2128,7 +2128,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -2548,7 +2548,7 @@ ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v9 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi a0, sp, 8 @@ -2571,7 +2571,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v11 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vmul.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2589,7 +2589,7 @@ ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v9, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v9 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2634,7 +2634,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i64( %va, i1 true) @@ -2673,7 +2673,7 @@ ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v10, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi a0, sp, 8 @@ -2696,7 +2696,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v14 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vmul.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2714,7 +2714,7 @@ ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vsrl.vi v10, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2759,7 +2759,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i64( %va, i1 true) @@ -2798,7 +2798,7 @@ ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v12, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi a0, sp, 8 @@ -2821,7 +2821,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v20 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2839,7 +2839,7 @@ ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vsrl.vi v12, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2884,7 +2884,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i64( %va, i1 true) @@ -2923,7 +2923,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v16, v8, a0 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 @@ -2946,7 +2946,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2964,7 +2964,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -3009,7 +3009,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i64( %va, i1 true) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -7,10 +7,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -29,10 +29,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -51,10 +51,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -73,10 +73,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -95,10 +95,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vi v10, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -117,10 +117,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vi v12, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vsub.vv v8, v8, v12 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -139,10 +139,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vi v16, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -176,7 +176,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -200,7 +200,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -229,7 +229,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -253,7 +253,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -282,7 +282,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -306,7 +306,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -335,7 +335,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -359,7 +359,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -388,7 +388,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -412,7 +412,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -441,7 +441,7 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret @@ -465,7 +465,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -790,7 +790,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v11 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vmul.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -839,7 +839,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv1i64( %va) @@ -888,7 +888,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v14 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vmul.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -937,7 +937,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv2i64( %va) @@ -986,7 +986,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v20 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1035,7 +1035,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv4i64( %va) @@ -1084,7 +1084,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1133,7 +1133,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -5,16 +5,16 @@ define @cttz_nxv1i8( %va) { ; CHECK-LABEL: cttz_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -31,16 +31,16 @@ define @cttz_nxv2i8( %va) { ; CHECK-LABEL: cttz_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -57,16 +57,16 @@ define @cttz_nxv4i8( %va) { ; CHECK-LABEL: cttz_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -83,16 +83,16 @@ define @cttz_nxv8i8( %va) { ; CHECK-LABEL: cttz_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -109,16 +109,16 @@ define @cttz_nxv16i8( %va) { ; CHECK-LABEL: cttz_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v10, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: vsrl.vi v10, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -135,16 +135,16 @@ define @cttz_nxv32i8( %va) { ; CHECK-LABEL: cttz_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v12, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vsub.vv v8, v8, v12 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -161,16 +161,16 @@ define @cttz_nxv64i8( %va) { ; CHECK-LABEL: cttz_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v16, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -187,7 +187,7 @@ define @cttz_nxv1i16( %va) { ; RV32-LABEL: cttz_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -208,14 +208,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -236,7 +236,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -248,7 +248,7 @@ define @cttz_nxv2i16( %va) { ; RV32-LABEL: cttz_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -269,14 +269,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -297,7 +297,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -309,7 +309,7 @@ define @cttz_nxv4i16( %va) { ; RV32-LABEL: cttz_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -330,14 +330,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -358,7 +358,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -370,7 +370,7 @@ define @cttz_nxv8i16( %va) { ; RV32-LABEL: cttz_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -391,14 +391,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -419,7 +419,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -431,7 +431,7 @@ define @cttz_nxv16i16( %va) { ; RV32-LABEL: cttz_nxv16i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -452,14 +452,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv16i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -480,7 +480,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -492,7 +492,7 @@ define @cttz_nxv32i16( %va) { ; RV32-LABEL: cttz_nxv32i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -513,14 +513,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv32i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -541,7 +541,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -553,7 +553,7 @@ define @cttz_nxv1i32( %va) { ; RV32-LABEL: cttz_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -582,7 +582,7 @@ ; ; RV64-LABEL: cttz_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -616,7 +616,7 @@ define @cttz_nxv2i32( %va) { ; RV32-LABEL: cttz_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -645,7 +645,7 @@ ; ; RV64-LABEL: cttz_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -679,7 +679,7 @@ define @cttz_nxv4i32( %va) { ; RV32-LABEL: cttz_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -708,7 +708,7 @@ ; ; RV64-LABEL: cttz_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -742,7 +742,7 @@ define @cttz_nxv8i32( %va) { ; RV32-LABEL: cttz_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -771,7 +771,7 @@ ; ; RV64-LABEL: cttz_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -805,7 +805,7 @@ define @cttz_nxv16i32( %va) { ; RV32-LABEL: cttz_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -834,7 +834,7 @@ ; ; RV64-LABEL: cttz_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -886,7 +886,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -910,14 +910,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v11 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vmul.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -963,7 +963,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i64( %va, i1 false) @@ -992,7 +992,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1016,14 +1016,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v14 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vmul.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1069,7 +1069,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i64( %va, i1 false) @@ -1098,7 +1098,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1122,14 +1122,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v20 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1175,7 +1175,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i64( %va, i1 false) @@ -1204,7 +1204,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1228,14 +1228,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1281,7 +1281,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i64( %va, i1 false) @@ -1292,16 +1292,16 @@ define @cttz_zero_undef_nxv1i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1317,16 +1317,16 @@ define @cttz_zero_undef_nxv2i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1342,16 +1342,16 @@ define @cttz_zero_undef_nxv4i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1367,16 +1367,16 @@ define @cttz_zero_undef_nxv8i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v9, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1392,16 +1392,16 @@ define @cttz_zero_undef_nxv16i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v10, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: vsrl.vi v10, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1417,16 +1417,16 @@ define @cttz_zero_undef_nxv32i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v12, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vsub.vv v8, v8, v12 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1442,16 +1442,16 @@ define @cttz_zero_undef_nxv64i8( %va) { ; CHECK-LABEL: cttz_zero_undef_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v16, v8, a0 ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 1 -; CHECK-NEXT: addi a0, zero, 85 +; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, 51 +; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 @@ -1467,7 +1467,7 @@ define @cttz_zero_undef_nxv1i16( %va) { ; RV32-LABEL: cttz_zero_undef_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1488,14 +1488,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1516,7 +1516,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1527,7 +1527,7 @@ define @cttz_zero_undef_nxv2i16( %va) { ; RV32-LABEL: cttz_zero_undef_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1548,14 +1548,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1576,7 +1576,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1587,7 +1587,7 @@ define @cttz_zero_undef_nxv4i16( %va) { ; RV32-LABEL: cttz_zero_undef_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1608,14 +1608,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1636,7 +1636,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1647,7 +1647,7 @@ define @cttz_zero_undef_nxv8i16( %va) { ; RV32-LABEL: cttz_zero_undef_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1668,14 +1668,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1696,7 +1696,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1707,7 +1707,7 @@ define @cttz_zero_undef_nxv16i16( %va) { ; RV32-LABEL: cttz_zero_undef_nxv16i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1728,14 +1728,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv16i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1756,7 +1756,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1767,7 +1767,7 @@ define @cttz_zero_undef_nxv32i16( %va) { ; RV32-LABEL: cttz_zero_undef_nxv32i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1788,14 +1788,14 @@ ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -241 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: addi a0, zero, 257 +; RV32-NEXT: li a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv32i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1816,7 +1816,7 @@ ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 257 +; RV64-NEXT: li a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 ; RV64-NEXT: ret @@ -1827,7 +1827,7 @@ define @cttz_zero_undef_nxv1i32( %va) { ; RV32-LABEL: cttz_zero_undef_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1856,7 +1856,7 @@ ; ; RV64-LABEL: cttz_zero_undef_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1889,7 +1889,7 @@ define @cttz_zero_undef_nxv2i32( %va) { ; RV32-LABEL: cttz_zero_undef_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1918,7 +1918,7 @@ ; ; RV64-LABEL: cttz_zero_undef_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -1951,7 +1951,7 @@ define @cttz_zero_undef_nxv4i32( %va) { ; RV32-LABEL: cttz_zero_undef_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -1980,7 +1980,7 @@ ; ; RV64-LABEL: cttz_zero_undef_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2013,7 +2013,7 @@ define @cttz_zero_undef_nxv8i32( %va) { ; RV32-LABEL: cttz_zero_undef_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -2042,7 +2042,7 @@ ; ; RV64-LABEL: cttz_zero_undef_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2075,7 +2075,7 @@ define @cttz_zero_undef_nxv16i32( %va) { ; RV32-LABEL: cttz_zero_undef_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -2104,7 +2104,7 @@ ; ; RV64-LABEL: cttz_zero_undef_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2155,7 +2155,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -2179,14 +2179,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v11 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vmul.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2232,7 +2232,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i64( %va, i1 true) @@ -2260,7 +2260,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -2284,14 +2284,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v14 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vmul.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2337,7 +2337,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i64( %va, i1 true) @@ -2365,7 +2365,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -2389,14 +2389,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v20 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2442,7 +2442,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i64( %va, i1 true) @@ -2470,7 +2470,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vxor.vi v8, v8, -1 @@ -2494,14 +2494,14 @@ ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: addi a0, zero, 56 +; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vxor.vi v8, v8, -1 @@ -2547,7 +2547,7 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: addi a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 56 +; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i64( %va, i1 true) diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -282,7 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: addi a1, zero, 6 +; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 @@ -305,7 +305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: addi a1, zero, 6 +; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -581,7 +581,7 @@ define i64 @extractelt_nxv1i64_0( %v) { ; CHECK-LABEL: extractelt_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v9, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v9 @@ -597,7 +597,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -611,7 +611,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -622,7 +622,7 @@ define i64 @extractelt_nxv2i64_0( %v) { ; CHECK-LABEL: extractelt_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v10, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v10 @@ -638,7 +638,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -652,7 +652,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -663,7 +663,7 @@ define i64 @extractelt_nxv4i64_0( %v) { ; CHECK-LABEL: extractelt_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v12, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v12 @@ -679,7 +679,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -693,7 +693,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -704,7 +704,7 @@ define i64 @extractelt_nxv8i64_0( %v) { ; CHECK-LABEL: extractelt_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v16, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v16 @@ -720,7 +720,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret @@ -734,7 +734,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll @@ -31,10 +31,10 @@ ; ; CHECK-ASM-LABEL: gather: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: mv a2, zero -; CHECK-ASM-NEXT: addi a6, zero, 32 -; CHECK-ASM-NEXT: addi a4, zero, 5 -; CHECK-ASM-NEXT: addi a5, zero, 1024 +; CHECK-ASM-NEXT: li a2, 0 +; CHECK-ASM-NEXT: li a6, 32 +; CHECK-ASM-NEXT: li a4, 5 +; CHECK-ASM-NEXT: li a5, 1024 ; CHECK-ASM-NEXT: .LBB0_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetvli zero, a6, e8, m1, ta, mu @@ -96,14 +96,14 @@ ; ; CHECK-ASM-LABEL: gather_masked: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: mv a2, zero +; CHECK-ASM-NEXT: li a2, 0 ; CHECK-ASM-NEXT: lui a3, 983765 ; CHECK-ASM-NEXT: addiw a3, a3, 873 ; CHECK-ASM-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-ASM-NEXT: vmv.s.x v0, a3 -; CHECK-ASM-NEXT: addi a6, zero, 32 -; CHECK-ASM-NEXT: addi a4, zero, 5 -; CHECK-ASM-NEXT: addi a5, zero, 1024 +; CHECK-ASM-NEXT: li a6, 32 +; CHECK-ASM-NEXT: li a4, 5 +; CHECK-ASM-NEXT: li a5, 1024 ; CHECK-ASM-NEXT: .LBB1_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetvli zero, a6, e8, m1, ta, mu @@ -166,11 +166,11 @@ ; ; CHECK-ASM-LABEL: gather_negative_stride: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: mv a2, zero +; CHECK-ASM-NEXT: li a2, 0 ; CHECK-ASM-NEXT: addi a1, a1, 155 -; CHECK-ASM-NEXT: addi a6, zero, 32 -; CHECK-ASM-NEXT: addi a4, zero, -5 -; CHECK-ASM-NEXT: addi a5, zero, 1024 +; CHECK-ASM-NEXT: li a6, 32 +; CHECK-ASM-NEXT: li a4, -5 +; CHECK-ASM-NEXT: li a5, 1024 ; CHECK-ASM-NEXT: .LBB2_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetvli zero, a6, e8, m1, ta, mu @@ -232,9 +232,9 @@ ; ; CHECK-ASM-LABEL: gather_zero_stride: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: mv a2, zero -; CHECK-ASM-NEXT: addi a3, zero, 32 -; CHECK-ASM-NEXT: addi a4, zero, 1024 +; CHECK-ASM-NEXT: li a2, 0 +; CHECK-ASM-NEXT: li a3, 32 +; CHECK-ASM-NEXT: li a4, 1024 ; CHECK-ASM-NEXT: .LBB3_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetvli zero, a3, e8, m1, ta, mu @@ -302,10 +302,10 @@ ; ; CHECK-ASM-LABEL: scatter: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: mv a2, zero -; CHECK-ASM-NEXT: addi a6, zero, 32 -; CHECK-ASM-NEXT: addi a4, zero, 5 -; CHECK-ASM-NEXT: addi a5, zero, 1024 +; CHECK-ASM-NEXT: li a2, 0 +; CHECK-ASM-NEXT: li a6, 32 +; CHECK-ASM-NEXT: li a4, 5 +; CHECK-ASM-NEXT: li a5, 1024 ; CHECK-ASM-NEXT: .LBB4_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: add a3, a1, a2 @@ -368,14 +368,14 @@ ; ; CHECK-ASM-LABEL: scatter_masked: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: mv a2, zero -; CHECK-ASM-NEXT: addi a6, zero, 32 +; CHECK-ASM-NEXT: li a2, 0 +; CHECK-ASM-NEXT: li a6, 32 ; CHECK-ASM-NEXT: lui a4, 983765 ; CHECK-ASM-NEXT: addiw a4, a4, 873 ; CHECK-ASM-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-ASM-NEXT: vmv.s.x v0, a4 -; CHECK-ASM-NEXT: addi a4, zero, 5 -; CHECK-ASM-NEXT: addi a5, zero, 1024 +; CHECK-ASM-NEXT: li a4, 5 +; CHECK-ASM-NEXT: li a5, 1024 ; CHECK-ASM-NEXT: .LBB5_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: add a3, a1, a2 @@ -442,9 +442,9 @@ ; ; CHECK-ASM-LABEL: gather_pow2: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: addi a2, zero, 1024 -; CHECK-ASM-NEXT: addi a3, zero, 16 -; CHECK-ASM-NEXT: addi a4, zero, 32 +; CHECK-ASM-NEXT: li a2, 1024 +; CHECK-ASM-NEXT: li a3, 16 +; CHECK-ASM-NEXT: li a4, 32 ; CHECK-ASM-NEXT: .LBB6_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetivli zero, 8, e32, m1, ta, mu @@ -517,9 +517,9 @@ ; ; CHECK-ASM-LABEL: scatter_pow2: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: addi a2, zero, 1024 -; CHECK-ASM-NEXT: addi a3, zero, 32 -; CHECK-ASM-NEXT: addi a4, zero, 16 +; CHECK-ASM-NEXT: li a2, 1024 +; CHECK-ASM-NEXT: li a3, 32 +; CHECK-ASM-NEXT: li a4, 16 ; CHECK-ASM-NEXT: .LBB7_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetvli zero, a3, e8, m1, ta, mu @@ -603,8 +603,8 @@ ; CHECK-ASM: # %bb.0: # %entry ; CHECK-ASM-NEXT: addi a0, a0, 32 ; CHECK-ASM-NEXT: addi a1, a1, 132 -; CHECK-ASM-NEXT: addi a2, zero, 1024 -; CHECK-ASM-NEXT: addi a3, zero, 16 +; CHECK-ASM-NEXT: li a2, 1024 +; CHECK-ASM-NEXT: li a3, 16 ; CHECK-ASM-NEXT: .LBB8_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: addi a4, a1, -128 @@ -742,9 +742,9 @@ ; ; CHECK-ASM-LABEL: gather_unroll: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: addi a2, zero, 256 -; CHECK-ASM-NEXT: addi a3, zero, 64 -; CHECK-ASM-NEXT: addi a4, zero, 16 +; CHECK-ASM-NEXT: li a2, 256 +; CHECK-ASM-NEXT: li a3, 64 +; CHECK-ASM-NEXT: li a4, 16 ; CHECK-ASM-NEXT: .LBB9_1: # %vector.body ; CHECK-ASM-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: vsetivli zero, 8, e32, m1, ta, mu @@ -858,8 +858,8 @@ ; CHECK-ASM-LABEL: gather_of_pointers: ; CHECK-ASM: # %bb.0: ; CHECK-ASM-NEXT: addi a0, a0, 16 -; CHECK-ASM-NEXT: addi a2, zero, 1024 -; CHECK-ASM-NEXT: addi a3, zero, 40 +; CHECK-ASM-NEXT: li a2, 1024 +; CHECK-ASM-NEXT: li a3, 40 ; CHECK-ASM-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: addi a4, a1, 80 ; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu @@ -934,8 +934,8 @@ ; CHECK-ASM-LABEL: scatter_of_pointers: ; CHECK-ASM: # %bb.0: ; CHECK-ASM-NEXT: addi a1, a1, 16 -; CHECK-ASM-NEXT: addi a2, zero, 1024 -; CHECK-ASM-NEXT: addi a3, zero, 40 +; CHECK-ASM-NEXT: li a2, 1024 +; CHECK-ASM-NEXT: li a3, 40 ; CHECK-ASM-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; CHECK-ASM-NEXT: addi a4, a1, -16 ; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll @@ -71,7 +71,7 @@ define void @abs_v32i8(<32 x i8>* %x) { ; LMULMAX2-LABEL: abs_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a1, zero, 32 +; LMULMAX2-NEXT: li a1, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll @@ -7,7 +7,7 @@ ; VLEN256-LABEL: bitcast_1024B: ; VLEN256: # %bb.0: ; VLEN256-NEXT: addi a1, a0, 256 -; VLEN256-NEXT: addi a2, zero, 256 +; VLEN256-NEXT: li a2, 256 ; VLEN256-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; VLEN256-NEXT: vle8.v v24, (a0) ; VLEN256-NEXT: vle8.v v0, (a1) @@ -17,14 +17,14 @@ ; ; VLEN512-LABEL: bitcast_1024B: ; VLEN512: # %bb.0: -; VLEN512-NEXT: addi a0, zero, 512 +; VLEN512-NEXT: li a0, 512 ; VLEN512-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; VLEN512-NEXT: vadd.vv v8, v16, v8 ; VLEN512-NEXT: ret ; ; VLEN1024-LABEL: bitcast_1024B: ; VLEN1024: # %bb.0: -; VLEN1024-NEXT: addi a0, zero, 512 +; VLEN1024-NEXT: li a0, 512 ; VLEN1024-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; VLEN1024-NEXT: vadd.vv v8, v12, v8 ; VLEN1024-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -5,7 +5,7 @@ define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) { ; CHECK-LABEL: bitcast_v4i8_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -77,7 +77,7 @@ define i64 @bitcast_v8i8_i64(<8 x i8> %a) { ; RV32-LABEL: bitcast_v8i8_i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -96,7 +96,7 @@ define i64 @bitcast_v4i16_i64(<4 x i16> %a) { ; RV32-LABEL: bitcast_v4i16_i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -115,7 +115,7 @@ define i64 @bitcast_v2i32_i64(<2 x i32> %a) { ; RV32-LABEL: bitcast_v2i32_i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -134,7 +134,7 @@ define i64 @bitcast_v1i64_i64(<1 x i64> %a) { ; RV32-LABEL: bitcast_v1i64_i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -203,7 +203,7 @@ define double @bitcast_v8i8_f64(<8 x i8> %a) { ; RV32-LABEL: bitcast_v8i8_f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -222,7 +222,7 @@ define double @bitcast_v4i16_f64(<4 x i16> %a) { ; RV32-LABEL: bitcast_v4i16_f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -241,7 +241,7 @@ define double @bitcast_v2i32_f64(<2 x i32> %a) { ; RV32-LABEL: bitcast_v2i32_f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -260,7 +260,7 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) { ; RV32-LABEL: bitcast_v1i64_f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -309,9 +309,9 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX2-RV32-NEXT: addi a2, zero, 40 +; LMULMAX2-RV32-NEXT: li a2, 40 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX2-RV32-NEXT: lui a3, 16 ; LMULMAX2-RV32-NEXT: addi a3, a3, -256 @@ -320,7 +320,7 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 24 ; LMULMAX2-RV32-NEXT: lui a4, 4080 ; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a4 -; LMULMAX2-RV32-NEXT: addi a5, zero, 5 +; LMULMAX2-RV32-NEXT: li a5, 5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -332,7 +332,7 @@ ; LMULMAX2-RV32-NEXT: vand.vv v11, v12, v11 ; LMULMAX2-RV32-NEXT: vor.vv v10, v11, v10 ; LMULMAX2-RV32-NEXT: vor.vv v9, v10, v9 -; LMULMAX2-RV32-NEXT: addi a5, zero, 255 +; LMULMAX2-RV32-NEXT: li a5, 255 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a5 ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 0, v0 @@ -393,9 +393,9 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX2-RV64-NEXT: addi a2, zero, 40 +; LMULMAX2-RV64-NEXT: li a2, 40 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX2-RV64-NEXT: lui a3, 16 ; LMULMAX2-RV64-NEXT: addiw a3, a3, -256 @@ -405,7 +405,7 @@ ; LMULMAX2-RV64-NEXT: lui a3, 4080 ; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX2-RV64-NEXT: vsrl.vi v11, v8, 8 -; LMULMAX2-RV64-NEXT: addi a3, zero, 255 +; LMULMAX2-RV64-NEXT: li a3, 255 ; LMULMAX2-RV64-NEXT: slli a4, a3, 24 ; LMULMAX2-RV64-NEXT: vand.vx v11, v11, a4 ; LMULMAX2-RV64-NEXT: vor.vv v10, v11, v10 @@ -470,9 +470,9 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a1, zero, 56 +; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: addi a2, zero, 40 +; LMULMAX1-RV32-NEXT: li a2, 40 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV32-NEXT: lui a3, 16 ; LMULMAX1-RV32-NEXT: addi a3, a3, -256 @@ -481,7 +481,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 24 ; LMULMAX1-RV32-NEXT: lui a4, 4080 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a4 -; LMULMAX1-RV32-NEXT: addi a5, zero, 5 +; LMULMAX1-RV32-NEXT: li a5, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -493,7 +493,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v11, v12, v11 ; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 ; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9 -; LMULMAX1-RV32-NEXT: addi a5, zero, 255 +; LMULMAX1-RV32-NEXT: li a5, 255 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a5 ; LMULMAX1-RV32-NEXT: vmerge.vim v10, v10, 0, v0 @@ -554,9 +554,9 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: addi a2, zero, 40 +; LMULMAX1-RV64-NEXT: li a2, 40 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV64-NEXT: lui a3, 16 ; LMULMAX1-RV64-NEXT: addiw a3, a3, -256 @@ -566,7 +566,7 @@ ; LMULMAX1-RV64-NEXT: lui a3, 4080 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 8 -; LMULMAX1-RV64-NEXT: addi a3, zero, 255 +; LMULMAX1-RV64-NEXT: li a3, 255 ; LMULMAX1-RV64-NEXT: slli a4, a3, 24 ; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 @@ -1035,9 +1035,9 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: addi a2, zero, 40 +; LMULMAX2-RV32-NEXT: li a2, 40 ; LMULMAX2-RV32-NEXT: vsrl.vx v12, v8, a2 ; LMULMAX2-RV32-NEXT: lui a3, 16 ; LMULMAX2-RV32-NEXT: addi a3, a3, -256 @@ -1046,7 +1046,7 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24 ; LMULMAX2-RV32-NEXT: lui a4, 4080 ; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4 -; LMULMAX2-RV32-NEXT: addi a5, zero, 85 +; LMULMAX2-RV32-NEXT: li a5, 85 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -1058,7 +1058,7 @@ ; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14 ; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV32-NEXT: addi a5, zero, 255 +; LMULMAX2-RV32-NEXT: li a5, 255 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a5 ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0 @@ -1119,9 +1119,9 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: addi a2, zero, 40 +; LMULMAX2-RV64-NEXT: li a2, 40 ; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a2 ; LMULMAX2-RV64-NEXT: lui a3, 16 ; LMULMAX2-RV64-NEXT: addiw a3, a3, -256 @@ -1131,7 +1131,7 @@ ; LMULMAX2-RV64-NEXT: lui a3, 4080 ; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a3 ; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 8 -; LMULMAX2-RV64-NEXT: addi a3, zero, 255 +; LMULMAX2-RV64-NEXT: li a3, 255 ; LMULMAX2-RV64-NEXT: slli a4, a3, 24 ; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a4 ; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12 @@ -1198,9 +1198,9 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v12, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a2, zero, 56 +; LMULMAX1-RV32-NEXT: li a2, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v9, v12, a2 -; LMULMAX1-RV32-NEXT: addi a3, zero, 40 +; LMULMAX1-RV32-NEXT: li a3, 40 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v12, a3 ; LMULMAX1-RV32-NEXT: lui a4, 16 ; LMULMAX1-RV32-NEXT: addi a4, a4, -256 @@ -1209,7 +1209,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v12, 24 ; LMULMAX1-RV32-NEXT: lui a6, 4080 ; LMULMAX1-RV32-NEXT: vand.vx v11, v9, a6 -; LMULMAX1-RV32-NEXT: addi a5, zero, 5 +; LMULMAX1-RV32-NEXT: li a5, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1221,7 +1221,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v9 ; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11 ; LMULMAX1-RV32-NEXT: vor.vv v13, v11, v10 -; LMULMAX1-RV32-NEXT: addi a5, zero, 255 +; LMULMAX1-RV32-NEXT: li a5, 255 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a5 ; LMULMAX1-RV32-NEXT: vmerge.vim v10, v10, 0, v0 @@ -1321,9 +1321,9 @@ ; LMULMAX1-RV64-NEXT: addi a7, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a7) ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi t0, zero, 56 +; LMULMAX1-RV64-NEXT: li t0, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, t0 -; LMULMAX1-RV64-NEXT: addi t1, zero, 40 +; LMULMAX1-RV64-NEXT: li t1, 40 ; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, t1 ; LMULMAX1-RV64-NEXT: lui a1, 16 ; LMULMAX1-RV64-NEXT: addiw t2, a1, -256 @@ -1333,7 +1333,7 @@ ; LMULMAX1-RV64-NEXT: lui a6, 4080 ; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a6 ; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 8 -; LMULMAX1-RV64-NEXT: addi a3, zero, 255 +; LMULMAX1-RV64-NEXT: li a3, 255 ; LMULMAX1-RV64-NEXT: slli t3, a3, 24 ; LMULMAX1-RV64-NEXT: vand.vx v12, v12, t3 ; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -141,9 +141,9 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX2-RV32-NEXT: addi a2, zero, 40 +; LMULMAX2-RV32-NEXT: li a2, 40 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX2-RV32-NEXT: lui a3, 16 ; LMULMAX2-RV32-NEXT: addi a3, a3, -256 @@ -152,7 +152,7 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 24 ; LMULMAX2-RV32-NEXT: lui a4, 4080 ; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a4 -; LMULMAX2-RV32-NEXT: addi a5, zero, 5 +; LMULMAX2-RV32-NEXT: li a5, 5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -164,7 +164,7 @@ ; LMULMAX2-RV32-NEXT: vand.vv v11, v12, v11 ; LMULMAX2-RV32-NEXT: vor.vv v10, v11, v10 ; LMULMAX2-RV32-NEXT: vor.vv v9, v10, v9 -; LMULMAX2-RV32-NEXT: addi a5, zero, 255 +; LMULMAX2-RV32-NEXT: li a5, 255 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a5 ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 0, v0 @@ -195,9 +195,9 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX2-RV64-NEXT: addi a2, zero, 40 +; LMULMAX2-RV64-NEXT: li a2, 40 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX2-RV64-NEXT: lui a3, 16 ; LMULMAX2-RV64-NEXT: addiw a3, a3, -256 @@ -207,7 +207,7 @@ ; LMULMAX2-RV64-NEXT: lui a3, 4080 ; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX2-RV64-NEXT: vsrl.vi v11, v8, 8 -; LMULMAX2-RV64-NEXT: addi a3, zero, 255 +; LMULMAX2-RV64-NEXT: li a3, 255 ; LMULMAX2-RV64-NEXT: slli a4, a3, 24 ; LMULMAX2-RV64-NEXT: vand.vx v11, v11, a4 ; LMULMAX2-RV64-NEXT: vor.vv v10, v11, v10 @@ -233,9 +233,9 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a1, zero, 56 +; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: addi a2, zero, 40 +; LMULMAX1-RV32-NEXT: li a2, 40 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV32-NEXT: lui a3, 16 ; LMULMAX1-RV32-NEXT: addi a3, a3, -256 @@ -244,7 +244,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 24 ; LMULMAX1-RV32-NEXT: lui a4, 4080 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a4 -; LMULMAX1-RV32-NEXT: addi a5, zero, 5 +; LMULMAX1-RV32-NEXT: li a5, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -256,7 +256,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v11, v12, v11 ; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 ; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9 -; LMULMAX1-RV32-NEXT: addi a5, zero, 255 +; LMULMAX1-RV32-NEXT: li a5, 255 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a5 ; LMULMAX1-RV32-NEXT: vmerge.vim v10, v10, 0, v0 @@ -287,9 +287,9 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: addi a2, zero, 40 +; LMULMAX1-RV64-NEXT: li a2, 40 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV64-NEXT: lui a3, 16 ; LMULMAX1-RV64-NEXT: addiw a3, a3, -256 @@ -299,7 +299,7 @@ ; LMULMAX1-RV64-NEXT: lui a3, 4080 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 8 -; LMULMAX1-RV64-NEXT: addi a3, zero, 255 +; LMULMAX1-RV64-NEXT: li a3, 255 ; LMULMAX1-RV64-NEXT: slli a4, a3, 24 ; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 @@ -501,9 +501,9 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: addi a2, zero, 40 +; LMULMAX2-RV32-NEXT: li a2, 40 ; LMULMAX2-RV32-NEXT: vsrl.vx v12, v8, a2 ; LMULMAX2-RV32-NEXT: lui a3, 16 ; LMULMAX2-RV32-NEXT: addi a3, a3, -256 @@ -512,7 +512,7 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24 ; LMULMAX2-RV32-NEXT: lui a4, 4080 ; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4 -; LMULMAX2-RV32-NEXT: addi a5, zero, 85 +; LMULMAX2-RV32-NEXT: li a5, 85 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -524,7 +524,7 @@ ; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14 ; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV32-NEXT: addi a5, zero, 255 +; LMULMAX2-RV32-NEXT: li a5, 255 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a5 ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0 @@ -555,9 +555,9 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: addi a2, zero, 40 +; LMULMAX2-RV64-NEXT: li a2, 40 ; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a2 ; LMULMAX2-RV64-NEXT: lui a3, 16 ; LMULMAX2-RV64-NEXT: addiw a3, a3, -256 @@ -567,7 +567,7 @@ ; LMULMAX2-RV64-NEXT: lui a3, 4080 ; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a3 ; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 8 -; LMULMAX2-RV64-NEXT: addi a3, zero, 255 +; LMULMAX2-RV64-NEXT: li a3, 255 ; LMULMAX2-RV64-NEXT: slli a4, a3, 24 ; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a4 ; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12 @@ -595,9 +595,9 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a6, zero, 56 +; LMULMAX1-RV32-NEXT: li a6, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v9, a6 -; LMULMAX1-RV32-NEXT: addi a3, zero, 40 +; LMULMAX1-RV32-NEXT: li a3, 40 ; LMULMAX1-RV32-NEXT: vsrl.vx v11, v9, a3 ; LMULMAX1-RV32-NEXT: lui a4, 16 ; LMULMAX1-RV32-NEXT: addi a4, a4, -256 @@ -606,7 +606,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 24 ; LMULMAX1-RV32-NEXT: lui a5, 4080 ; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV32-NEXT: addi a2, zero, 5 +; LMULMAX1-RV32-NEXT: li a2, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -618,7 +618,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12 ; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11 ; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV32-NEXT: addi a2, zero, 255 +; LMULMAX1-RV32-NEXT: li a2, 255 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmv.v.x v11, a2 ; LMULMAX1-RV32-NEXT: vmerge.vim v11, v11, 0, v0 @@ -673,9 +673,9 @@ ; LMULMAX1-RV64-NEXT: addi t1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (t1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) -; LMULMAX1-RV64-NEXT: addi a7, zero, 56 +; LMULMAX1-RV64-NEXT: li a7, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a7 -; LMULMAX1-RV64-NEXT: addi t0, zero, 40 +; LMULMAX1-RV64-NEXT: li t0, 40 ; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, t0 ; LMULMAX1-RV64-NEXT: lui a4, 16 ; LMULMAX1-RV64-NEXT: addiw a4, a4, -256 @@ -685,7 +685,7 @@ ; LMULMAX1-RV64-NEXT: lui a6, 4080 ; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a6 ; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 8 -; LMULMAX1-RV64-NEXT: addi a5, zero, 255 +; LMULMAX1-RV64-NEXT: li a5, 255 ; LMULMAX1-RV64-NEXT: slli a2, a5, 24 ; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a2 ; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -63,7 +63,7 @@ define fastcc <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) { ; CHECK-LABEL: ret_mask_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret @@ -75,7 +75,7 @@ define fastcc <64 x i32> @ret_split_v64i32(<64 x i32>* %x) { ; LMULMAX8-LABEL: ret_split_v64i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, zero, 32 +; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: addi a0, a0, 128 @@ -101,7 +101,7 @@ define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) { ; LMULMAX8-LABEL: ret_split_v128i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a1) ; LMULMAX8-NEXT: addi a2, a1, 128 @@ -200,7 +200,7 @@ define fastcc <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) { ; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) { ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 @@ -251,7 +251,7 @@ ; LMULMAX8-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: vmv8r.v v24, v8 -; LMULMAX8-NEXT: addi a1, zero, 2 +; LMULMAX8-NEXT: li a1, 2 ; LMULMAX8-NEXT: vmv8r.v v8, v16 ; LMULMAX8-NEXT: vmv8r.v v16, v24 ; LMULMAX8-NEXT: call ext2@plt @@ -267,7 +267,7 @@ ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: vmv4r.v v24, v12 ; LMULMAX4-NEXT: vmv4r.v v28, v8 -; LMULMAX4-NEXT: addi a1, zero, 2 +; LMULMAX4-NEXT: li a1, 2 ; LMULMAX4-NEXT: vmv4r.v v8, v16 ; LMULMAX4-NEXT: vmv4r.v v12, v20 ; LMULMAX4-NEXT: vmv4r.v v16, v28 @@ -292,11 +292,11 @@ ; LMULMAX8-NEXT: addi s0, sp, 384 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: addi a0, sp, 128 -; LMULMAX8-NEXT: addi a2, zero, 42 +; LMULMAX8-NEXT: li a2, 42 ; LMULMAX8-NEXT: addi a3, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a3) ; LMULMAX8-NEXT: vmv8r.v v8, v24 @@ -325,7 +325,7 @@ ; LMULMAX4-NEXT: addi a0, sp, 192 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: addi a0, sp, 128 -; LMULMAX4-NEXT: addi a3, zero, 42 +; LMULMAX4-NEXT: li a3, 42 ; LMULMAX4-NEXT: addi a1, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a1) ; LMULMAX4-NEXT: vmv4r.v v8, v24 @@ -346,7 +346,7 @@ define fastcc <32 x i32> @vector_arg_indirect_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) { ; LMULMAX8-LABEL: vector_arg_indirect_stack: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v16, (t2) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 @@ -378,21 +378,21 @@ ; LMULMAX8-NEXT: addi s0, sp, 384 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 -; LMULMAX8-NEXT: addi a1, zero, 1 -; LMULMAX8-NEXT: addi a2, zero, 2 -; LMULMAX8-NEXT: addi a3, zero, 3 -; LMULMAX8-NEXT: addi a4, zero, 4 -; LMULMAX8-NEXT: addi a5, zero, 5 -; LMULMAX8-NEXT: addi a6, zero, 6 -; LMULMAX8-NEXT: addi a7, zero, 7 +; LMULMAX8-NEXT: li a1, 1 +; LMULMAX8-NEXT: li a2, 2 +; LMULMAX8-NEXT: li a3, 3 +; LMULMAX8-NEXT: li a4, 4 +; LMULMAX8-NEXT: li a5, 5 +; LMULMAX8-NEXT: li a6, 6 +; LMULMAX8-NEXT: li a7, 7 ; LMULMAX8-NEXT: addi t2, sp, 128 -; LMULMAX8-NEXT: addi t3, zero, 8 +; LMULMAX8-NEXT: li t3, 8 ; LMULMAX8-NEXT: addi a0, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a0) -; LMULMAX8-NEXT: mv a0, zero +; LMULMAX8-NEXT: li a0, 0 ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX8-NEXT: addi sp, s0, -384 @@ -416,18 +416,18 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (a0) -; LMULMAX4-NEXT: addi a1, zero, 1 -; LMULMAX4-NEXT: addi a2, zero, 2 -; LMULMAX4-NEXT: addi a3, zero, 3 -; LMULMAX4-NEXT: addi a4, zero, 4 -; LMULMAX4-NEXT: addi a5, zero, 5 -; LMULMAX4-NEXT: addi a6, zero, 6 -; LMULMAX4-NEXT: addi a7, zero, 7 +; LMULMAX4-NEXT: li a1, 1 +; LMULMAX4-NEXT: li a2, 2 +; LMULMAX4-NEXT: li a3, 3 +; LMULMAX4-NEXT: li a4, 4 +; LMULMAX4-NEXT: li a5, 5 +; LMULMAX4-NEXT: li a6, 6 +; LMULMAX4-NEXT: li a7, 7 ; LMULMAX4-NEXT: addi t2, sp, 128 -; LMULMAX4-NEXT: addi t4, zero, 8 +; LMULMAX4-NEXT: li t4, 8 ; LMULMAX4-NEXT: addi a0, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a0) -; LMULMAX4-NEXT: mv a0, zero +; LMULMAX4-NEXT: li a0, 0 ; LMULMAX4-NEXT: vmv4r.v v12, v8 ; LMULMAX4-NEXT: vmv4r.v v16, v8 ; LMULMAX4-NEXT: vmv4r.v v20, v8 @@ -447,7 +447,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi sp, sp, -16 ; LMULMAX8-NEXT: .cfi_def_cfa_offset 16 -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: addi a0, sp, 24 ; LMULMAX8-NEXT: vle32.v v24, (a0) @@ -484,28 +484,28 @@ ; LMULMAX8-NEXT: .cfi_def_cfa_offset 160 ; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: addi a0, sp, 8 ; LMULMAX8-NEXT: vse32.v v8, (a0) -; LMULMAX8-NEXT: addi a0, zero, 1 +; LMULMAX8-NEXT: li a0, 1 ; LMULMAX8-NEXT: sd a0, 136(sp) -; LMULMAX8-NEXT: addi a0, zero, 13 -; LMULMAX8-NEXT: addi a1, zero, 1 -; LMULMAX8-NEXT: addi a2, zero, 2 -; LMULMAX8-NEXT: addi a3, zero, 3 -; LMULMAX8-NEXT: addi a4, zero, 4 -; LMULMAX8-NEXT: addi a5, zero, 5 -; LMULMAX8-NEXT: addi a6, zero, 6 -; LMULMAX8-NEXT: addi a7, zero, 7 -; LMULMAX8-NEXT: addi t2, zero, 8 -; LMULMAX8-NEXT: addi t3, zero, 9 -; LMULMAX8-NEXT: addi t4, zero, 10 -; LMULMAX8-NEXT: addi t5, zero, 11 -; LMULMAX8-NEXT: addi t6, zero, 12 +; LMULMAX8-NEXT: li a0, 13 +; LMULMAX8-NEXT: li a1, 1 +; LMULMAX8-NEXT: li a2, 2 +; LMULMAX8-NEXT: li a3, 3 +; LMULMAX8-NEXT: li a4, 4 +; LMULMAX8-NEXT: li a5, 5 +; LMULMAX8-NEXT: li a6, 6 +; LMULMAX8-NEXT: li a7, 7 +; LMULMAX8-NEXT: li t2, 8 +; LMULMAX8-NEXT: li t3, 9 +; LMULMAX8-NEXT: li t4, 10 +; LMULMAX8-NEXT: li t5, 11 +; LMULMAX8-NEXT: li t6, 12 ; LMULMAX8-NEXT: sd a0, 0(sp) -; LMULMAX8-NEXT: mv a0, zero +; LMULMAX8-NEXT: li a0, 0 ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_arg_direct_stack@plt ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload @@ -518,29 +518,29 @@ ; LMULMAX4-NEXT: .cfi_def_cfa_offset 160 ; LMULMAX4-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 -; LMULMAX4-NEXT: addi a0, zero, 1 +; LMULMAX4-NEXT: li a0, 1 ; LMULMAX4-NEXT: sd a0, 136(sp) -; LMULMAX4-NEXT: addi a0, zero, 13 +; LMULMAX4-NEXT: li a0, 13 ; LMULMAX4-NEXT: sd a0, 0(sp) ; LMULMAX4-NEXT: addi a0, sp, 72 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: addi a0, sp, 8 -; LMULMAX4-NEXT: addi a1, zero, 1 -; LMULMAX4-NEXT: addi a2, zero, 2 -; LMULMAX4-NEXT: addi a3, zero, 3 -; LMULMAX4-NEXT: addi a4, zero, 4 -; LMULMAX4-NEXT: addi a5, zero, 5 -; LMULMAX4-NEXT: addi a6, zero, 6 -; LMULMAX4-NEXT: addi a7, zero, 7 -; LMULMAX4-NEXT: addi t2, zero, 8 -; LMULMAX4-NEXT: addi t3, zero, 9 -; LMULMAX4-NEXT: addi t4, zero, 10 -; LMULMAX4-NEXT: addi t5, zero, 11 -; LMULMAX4-NEXT: addi t6, zero, 12 +; LMULMAX4-NEXT: li a1, 1 +; LMULMAX4-NEXT: li a2, 2 +; LMULMAX4-NEXT: li a3, 3 +; LMULMAX4-NEXT: li a4, 4 +; LMULMAX4-NEXT: li a5, 5 +; LMULMAX4-NEXT: li a6, 6 +; LMULMAX4-NEXT: li a7, 7 +; LMULMAX4-NEXT: li t2, 8 +; LMULMAX4-NEXT: li t3, 9 +; LMULMAX4-NEXT: li t4, 10 +; LMULMAX4-NEXT: li t5, 11 +; LMULMAX4-NEXT: li t6, 12 ; LMULMAX4-NEXT: vse32.v v8, (a0) -; LMULMAX4-NEXT: mv a0, zero +; LMULMAX4-NEXT: li a0, 0 ; LMULMAX4-NEXT: vmv4r.v v12, v8 ; LMULMAX4-NEXT: vmv4r.v v16, v8 ; LMULMAX4-NEXT: vmv4r.v v20, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -117,21 +117,21 @@ define <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) { ; LMULMAX8-LABEL: ret_mask_v32i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, zero, 32 +; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX8-NEXT: vlm.v v0, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_mask_v32i1: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi a1, zero, 32 +; LMULMAX4-NEXT: li a1, 32 ; LMULMAX4-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX4-NEXT: vlm.v v0, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_mask_v32i1: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a1, zero, 32 +; LMULMAX2-NEXT: li a1, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: ret @@ -151,7 +151,7 @@ define <64 x i32> @ret_split_v64i32(<64 x i32>* %x) { ; LMULMAX8-LABEL: ret_split_v64i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, zero, 32 +; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: addi a0, a0, 128 @@ -233,7 +233,7 @@ define <128 x i32> @ret_split_v128i32(<128 x i32>* %x) { ; LMULMAX8-LABEL: ret_split_v128i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a1) ; LMULMAX8-NEXT: addi a2, a1, 128 @@ -549,21 +549,21 @@ define <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) { ; LMULMAX8-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vmand.mm v0, v0, v8 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi a0, zero, 32 +; LMULMAX4-NEXT: li a0, 32 ; LMULMAX4-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX4-NEXT: vmand.mm v0, v0, v8 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a0, zero, 32 +; LMULMAX2-NEXT: li a0, 32 ; LMULMAX2-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX2-NEXT: vmand.mm v0, v0, v8 ; LMULMAX2-NEXT: ret @@ -581,7 +581,7 @@ define <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) { ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 @@ -690,7 +690,7 @@ ; LMULMAX8-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: vmv8r.v v24, v8 -; LMULMAX8-NEXT: addi a1, zero, 2 +; LMULMAX8-NEXT: li a1, 2 ; LMULMAX8-NEXT: vmv8r.v v8, v16 ; LMULMAX8-NEXT: vmv8r.v v16, v24 ; LMULMAX8-NEXT: call ext2@plt @@ -706,7 +706,7 @@ ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: vmv4r.v v24, v12 ; LMULMAX4-NEXT: vmv4r.v v28, v8 -; LMULMAX4-NEXT: addi a1, zero, 2 +; LMULMAX4-NEXT: li a1, 2 ; LMULMAX4-NEXT: vmv4r.v v8, v16 ; LMULMAX4-NEXT: vmv4r.v v12, v20 ; LMULMAX4-NEXT: vmv4r.v v16, v28 @@ -726,7 +726,7 @@ ; LMULMAX2-NEXT: vmv2r.v v26, v12 ; LMULMAX2-NEXT: vmv2r.v v28, v10 ; LMULMAX2-NEXT: vmv2r.v v30, v8 -; LMULMAX2-NEXT: addi a1, zero, 2 +; LMULMAX2-NEXT: li a1, 2 ; LMULMAX2-NEXT: vmv2r.v v8, v16 ; LMULMAX2-NEXT: vmv2r.v v10, v18 ; LMULMAX2-NEXT: vmv2r.v v12, v20 @@ -754,7 +754,7 @@ ; LMULMAX1-NEXT: vmv1r.v v29, v10 ; LMULMAX1-NEXT: vmv1r.v v30, v9 ; LMULMAX1-NEXT: vmv1r.v v31, v8 -; LMULMAX1-NEXT: addi a1, zero, 2 +; LMULMAX1-NEXT: li a1, 2 ; LMULMAX1-NEXT: vmv1r.v v8, v16 ; LMULMAX1-NEXT: vmv1r.v v9, v17 ; LMULMAX1-NEXT: vmv1r.v v10, v18 @@ -791,11 +791,11 @@ ; LMULMAX8-NEXT: addi s0, sp, 384 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: addi a0, sp, 128 -; LMULMAX8-NEXT: addi a2, zero, 42 +; LMULMAX8-NEXT: li a2, 42 ; LMULMAX8-NEXT: addi a3, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a3) ; LMULMAX8-NEXT: vmv8r.v v8, v24 @@ -824,7 +824,7 @@ ; LMULMAX4-NEXT: addi a0, sp, 192 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: addi a0, sp, 128 -; LMULMAX4-NEXT: addi a3, zero, 42 +; LMULMAX4-NEXT: li a3, 42 ; LMULMAX4-NEXT: addi a1, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a1) ; LMULMAX4-NEXT: vmv4r.v v8, v24 @@ -862,7 +862,7 @@ ; LMULMAX2-NEXT: addi a0, sp, 160 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: addi a0, sp, 128 -; LMULMAX2-NEXT: addi a5, zero, 42 +; LMULMAX2-NEXT: li a5, 42 ; LMULMAX2-NEXT: addi a1, sp, 128 ; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: vmv2r.v v8, v24 @@ -918,7 +918,7 @@ ; LMULMAX1-NEXT: vse32.v v10, (a1) ; LMULMAX1-NEXT: addi a1, sp, 144 ; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: addi a1, zero, 42 +; LMULMAX1-NEXT: li a1, 42 ; LMULMAX1-NEXT: sd a1, 8(sp) ; LMULMAX1-NEXT: sd a0, 0(sp) ; LMULMAX1-NEXT: addi a0, sp, 128 @@ -958,7 +958,7 @@ define <32 x i32> @split_vector_args(<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>, <32 x i32> %y, <32 x i32> %z) { ; LMULMAX8-LABEL: split_vector_args: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, zero, 32 +; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v16, v8 @@ -1027,7 +1027,7 @@ ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v16, (a1) ; LMULMAX8-NEXT: addi a0, sp, 128 @@ -1183,7 +1183,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi sp, sp, -16 ; LMULMAX8-NEXT: .cfi_def_cfa_offset 16 -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: addi a0, sp, 16 ; LMULMAX8-NEXT: vle32.v v16, (a0) @@ -1268,20 +1268,20 @@ ; LMULMAX8-NEXT: .cfi_def_cfa_offset 144 ; LMULMAX8-NEXT: sd ra, 136(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse32.v v8, (sp) -; LMULMAX8-NEXT: addi a0, zero, 8 -; LMULMAX8-NEXT: addi a1, zero, 1 -; LMULMAX8-NEXT: addi a2, zero, 2 -; LMULMAX8-NEXT: addi a3, zero, 3 -; LMULMAX8-NEXT: addi a4, zero, 4 -; LMULMAX8-NEXT: addi a5, zero, 5 -; LMULMAX8-NEXT: addi a6, zero, 6 -; LMULMAX8-NEXT: addi a7, zero, 7 +; LMULMAX8-NEXT: li a0, 8 +; LMULMAX8-NEXT: li a1, 1 +; LMULMAX8-NEXT: li a2, 2 +; LMULMAX8-NEXT: li a3, 3 +; LMULMAX8-NEXT: li a4, 4 +; LMULMAX8-NEXT: li a5, 5 +; LMULMAX8-NEXT: li a6, 6 +; LMULMAX8-NEXT: li a7, 7 ; LMULMAX8-NEXT: sd a0, 128(sp) -; LMULMAX8-NEXT: mv a0, zero +; LMULMAX8-NEXT: li a0, 0 ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_arg_via_stack@plt ; LMULMAX8-NEXT: ld ra, 136(sp) # 8-byte Folded Reload @@ -1294,21 +1294,21 @@ ; LMULMAX4-NEXT: .cfi_def_cfa_offset 144 ; LMULMAX4-NEXT: sd ra, 136(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 -; LMULMAX4-NEXT: addi a0, zero, 8 +; LMULMAX4-NEXT: li a0, 8 ; LMULMAX4-NEXT: sd a0, 128(sp) ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (sp) ; LMULMAX4-NEXT: addi a0, sp, 64 -; LMULMAX4-NEXT: addi a1, zero, 1 -; LMULMAX4-NEXT: addi a2, zero, 2 -; LMULMAX4-NEXT: addi a3, zero, 3 -; LMULMAX4-NEXT: addi a4, zero, 4 -; LMULMAX4-NEXT: addi a5, zero, 5 -; LMULMAX4-NEXT: addi a6, zero, 6 -; LMULMAX4-NEXT: addi a7, zero, 7 +; LMULMAX4-NEXT: li a1, 1 +; LMULMAX4-NEXT: li a2, 2 +; LMULMAX4-NEXT: li a3, 3 +; LMULMAX4-NEXT: li a4, 4 +; LMULMAX4-NEXT: li a5, 5 +; LMULMAX4-NEXT: li a6, 6 +; LMULMAX4-NEXT: li a7, 7 ; LMULMAX4-NEXT: vse32.v v8, (a0) -; LMULMAX4-NEXT: mv a0, zero +; LMULMAX4-NEXT: li a0, 0 ; LMULMAX4-NEXT: vmv4r.v v12, v8 ; LMULMAX4-NEXT: vmv4r.v v16, v8 ; LMULMAX4-NEXT: vmv4r.v v20, v8 @@ -1323,7 +1323,7 @@ ; LMULMAX2-NEXT: .cfi_def_cfa_offset 144 ; LMULMAX2-NEXT: sd ra, 136(sp) # 8-byte Folded Spill ; LMULMAX2-NEXT: .cfi_offset ra, -8 -; LMULMAX2-NEXT: addi a0, zero, 8 +; LMULMAX2-NEXT: li a0, 8 ; LMULMAX2-NEXT: sd a0, 128(sp) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 @@ -1333,15 +1333,15 @@ ; LMULMAX2-NEXT: addi a0, sp, 64 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: addi a0, sp, 32 -; LMULMAX2-NEXT: addi a1, zero, 1 -; LMULMAX2-NEXT: addi a2, zero, 2 -; LMULMAX2-NEXT: addi a3, zero, 3 -; LMULMAX2-NEXT: addi a4, zero, 4 -; LMULMAX2-NEXT: addi a5, zero, 5 -; LMULMAX2-NEXT: addi a6, zero, 6 -; LMULMAX2-NEXT: addi a7, zero, 7 +; LMULMAX2-NEXT: li a1, 1 +; LMULMAX2-NEXT: li a2, 2 +; LMULMAX2-NEXT: li a3, 3 +; LMULMAX2-NEXT: li a4, 4 +; LMULMAX2-NEXT: li a5, 5 +; LMULMAX2-NEXT: li a6, 6 +; LMULMAX2-NEXT: li a7, 7 ; LMULMAX2-NEXT: vse32.v v8, (a0) -; LMULMAX2-NEXT: mv a0, zero +; LMULMAX2-NEXT: li a0, 0 ; LMULMAX2-NEXT: vmv2r.v v10, v8 ; LMULMAX2-NEXT: vmv2r.v v12, v8 ; LMULMAX2-NEXT: vmv2r.v v14, v8 @@ -1360,7 +1360,7 @@ ; LMULMAX1-NEXT: .cfi_def_cfa_offset 144 ; LMULMAX1-NEXT: sd ra, 136(sp) # 8-byte Folded Spill ; LMULMAX1-NEXT: .cfi_offset ra, -8 -; LMULMAX1-NEXT: addi a0, zero, 8 +; LMULMAX1-NEXT: li a0, 8 ; LMULMAX1-NEXT: sd a0, 128(sp) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vmv.v.i v8, 0 @@ -1378,15 +1378,15 @@ ; LMULMAX1-NEXT: addi a0, sp, 32 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: addi a1, zero, 1 -; LMULMAX1-NEXT: addi a2, zero, 2 -; LMULMAX1-NEXT: addi a3, zero, 3 -; LMULMAX1-NEXT: addi a4, zero, 4 -; LMULMAX1-NEXT: addi a5, zero, 5 -; LMULMAX1-NEXT: addi a6, zero, 6 -; LMULMAX1-NEXT: addi a7, zero, 7 +; LMULMAX1-NEXT: li a1, 1 +; LMULMAX1-NEXT: li a2, 2 +; LMULMAX1-NEXT: li a3, 3 +; LMULMAX1-NEXT: li a4, 4 +; LMULMAX1-NEXT: li a5, 5 +; LMULMAX1-NEXT: li a6, 6 +; LMULMAX1-NEXT: li a7, 7 ; LMULMAX1-NEXT: vse32.v v8, (a0) -; LMULMAX1-NEXT: mv a0, zero +; LMULMAX1-NEXT: li a0, 0 ; LMULMAX1-NEXT: vmv1r.v v9, v8 ; LMULMAX1-NEXT: vmv1r.v v10, v8 ; LMULMAX1-NEXT: vmv1r.v v11, v8 @@ -1434,11 +1434,11 @@ ; LMULMAX8-NEXT: .cfi_def_cfa_offset 160 ; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse32.v v8, (sp) -; LMULMAX8-NEXT: addi a0, zero, 8 +; LMULMAX8-NEXT: li a0, 8 ; LMULMAX8-NEXT: sd a0, 128(sp) ; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; LMULMAX8-NEXT: vmv.v.i v16, 0 @@ -1450,15 +1450,15 @@ ; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX8-NEXT: vmsne.vi v16, v17, 0 ; LMULMAX8-NEXT: addi a0, sp, 136 -; LMULMAX8-NEXT: addi a5, zero, 5 -; LMULMAX8-NEXT: addi a6, zero, 6 -; LMULMAX8-NEXT: addi a7, zero, 7 +; LMULMAX8-NEXT: li a5, 5 +; LMULMAX8-NEXT: li a6, 6 +; LMULMAX8-NEXT: li a7, 7 ; LMULMAX8-NEXT: vsm.v v16, (a0) -; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: mv a1, zero -; LMULMAX8-NEXT: mv a2, zero -; LMULMAX8-NEXT: mv a3, zero -; LMULMAX8-NEXT: mv a4, zero +; LMULMAX8-NEXT: li a0, 0 +; LMULMAX8-NEXT: li a1, 0 +; LMULMAX8-NEXT: li a2, 0 +; LMULMAX8-NEXT: li a3, 0 +; LMULMAX8-NEXT: li a4, 0 ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_mask_arg_via_stack@plt ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload @@ -1471,7 +1471,7 @@ ; LMULMAX4-NEXT: .cfi_def_cfa_offset 160 ; LMULMAX4-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 -; LMULMAX4-NEXT: addi a0, zero, 8 +; LMULMAX4-NEXT: li a0, 8 ; LMULMAX4-NEXT: sd a0, 128(sp) ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vmv.v.i v8, 0 @@ -1488,15 +1488,15 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX4-NEXT: vmsne.vi v12, v13, 0 ; LMULMAX4-NEXT: addi a0, sp, 136 -; LMULMAX4-NEXT: addi a5, zero, 5 -; LMULMAX4-NEXT: addi a6, zero, 6 -; LMULMAX4-NEXT: addi a7, zero, 7 +; LMULMAX4-NEXT: li a5, 5 +; LMULMAX4-NEXT: li a6, 6 +; LMULMAX4-NEXT: li a7, 7 ; LMULMAX4-NEXT: vsm.v v12, (a0) -; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: mv a1, zero -; LMULMAX4-NEXT: mv a2, zero -; LMULMAX4-NEXT: mv a3, zero -; LMULMAX4-NEXT: mv a4, zero +; LMULMAX4-NEXT: li a0, 0 +; LMULMAX4-NEXT: li a1, 0 +; LMULMAX4-NEXT: li a2, 0 +; LMULMAX4-NEXT: li a3, 0 +; LMULMAX4-NEXT: li a4, 0 ; LMULMAX4-NEXT: vmv4r.v v12, v8 ; LMULMAX4-NEXT: vmv4r.v v16, v8 ; LMULMAX4-NEXT: vmv4r.v v20, v8 @@ -1511,7 +1511,7 @@ ; LMULMAX2-NEXT: .cfi_def_cfa_offset 160 ; LMULMAX2-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX2-NEXT: .cfi_offset ra, -8 -; LMULMAX2-NEXT: addi a0, zero, 8 +; LMULMAX2-NEXT: li a0, 8 ; LMULMAX2-NEXT: sd a0, 128(sp) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 @@ -1532,15 +1532,15 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v10, v11, 0 ; LMULMAX2-NEXT: addi a0, sp, 136 -; LMULMAX2-NEXT: addi a5, zero, 5 -; LMULMAX2-NEXT: addi a6, zero, 6 -; LMULMAX2-NEXT: addi a7, zero, 7 +; LMULMAX2-NEXT: li a5, 5 +; LMULMAX2-NEXT: li a6, 6 +; LMULMAX2-NEXT: li a7, 7 ; LMULMAX2-NEXT: vsm.v v10, (a0) -; LMULMAX2-NEXT: mv a0, zero -; LMULMAX2-NEXT: mv a1, zero -; LMULMAX2-NEXT: mv a2, zero -; LMULMAX2-NEXT: mv a3, zero -; LMULMAX2-NEXT: mv a4, zero +; LMULMAX2-NEXT: li a0, 0 +; LMULMAX2-NEXT: li a1, 0 +; LMULMAX2-NEXT: li a2, 0 +; LMULMAX2-NEXT: li a3, 0 +; LMULMAX2-NEXT: li a4, 0 ; LMULMAX2-NEXT: vmv2r.v v10, v8 ; LMULMAX2-NEXT: vmv2r.v v12, v8 ; LMULMAX2-NEXT: vmv2r.v v14, v8 @@ -1559,7 +1559,7 @@ ; LMULMAX1-NEXT: .cfi_def_cfa_offset 160 ; LMULMAX1-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX1-NEXT: .cfi_offset ra, -8 -; LMULMAX1-NEXT: addi a0, zero, 8 +; LMULMAX1-NEXT: li a0, 8 ; LMULMAX1-NEXT: sd a0, 128(sp) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vmv.v.i v8, 0 @@ -1588,15 +1588,15 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v9, v10, 0 ; LMULMAX1-NEXT: addi a0, sp, 136 -; LMULMAX1-NEXT: addi a5, zero, 5 -; LMULMAX1-NEXT: addi a6, zero, 6 -; LMULMAX1-NEXT: addi a7, zero, 7 +; LMULMAX1-NEXT: li a5, 5 +; LMULMAX1-NEXT: li a6, 6 +; LMULMAX1-NEXT: li a7, 7 ; LMULMAX1-NEXT: vsm.v v9, (a0) -; LMULMAX1-NEXT: mv a0, zero -; LMULMAX1-NEXT: mv a1, zero -; LMULMAX1-NEXT: mv a2, zero -; LMULMAX1-NEXT: mv a3, zero -; LMULMAX1-NEXT: mv a4, zero +; LMULMAX1-NEXT: li a0, 0 +; LMULMAX1-NEXT: li a1, 0 +; LMULMAX1-NEXT: li a2, 0 +; LMULMAX1-NEXT: li a3, 0 +; LMULMAX1-NEXT: li a4, 0 ; LMULMAX1-NEXT: vmv1r.v v9, v8 ; LMULMAX1-NEXT: vmv1r.v v10, v8 ; LMULMAX1-NEXT: vmv1r.v v11, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -17,10 +17,10 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 85 +; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vand.vx v9, v9, a1 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: addi a1, zero, 51 +; LMULMAX2-RV32-NEXT: li a1, 51 ; LMULMAX2-RV32-NEXT: vand.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 @@ -43,10 +43,10 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 85 +; LMULMAX2-RV64-NEXT: li a1, 85 ; LMULMAX2-RV64-NEXT: vand.vx v9, v9, a1 ; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v9 -; LMULMAX2-RV64-NEXT: addi a1, zero, 51 +; LMULMAX2-RV64-NEXT: li a1, 51 ; LMULMAX2-RV64-NEXT: vand.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 @@ -69,10 +69,10 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX1-RV32-NEXT: addi a1, zero, 85 +; LMULMAX1-RV32-NEXT: li a1, 85 ; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a1 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: addi a1, zero, 51 +; LMULMAX1-RV32-NEXT: li a1, 51 ; LMULMAX1-RV32-NEXT: vand.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a1 @@ -95,10 +95,10 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 85 +; LMULMAX1-RV64-NEXT: li a1, 85 ; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a1 ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: addi a1, zero, 51 +; LMULMAX1-RV64-NEXT: li a1, 51 ; LMULMAX1-RV64-NEXT: vand.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1 @@ -146,7 +146,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 1 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) @@ -181,7 +181,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 1 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) @@ -216,7 +216,7 @@ ; LMULMAX1-RV32-NEXT: lui a1, 1 ; LMULMAX1-RV32-NEXT: addi a1, a1, -241 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX1-RV32-NEXT: addi a1, zero, 257 +; LMULMAX1-RV32-NEXT: li a1, 257 ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) @@ -251,7 +251,7 @@ ; LMULMAX1-RV64-NEXT: lui a1, 1 ; LMULMAX1-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 257 +; LMULMAX1-RV64-NEXT: li a1, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) @@ -439,7 +439,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: addi a1, zero, 32 +; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -477,7 +477,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret @@ -496,7 +496,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64-NEXT: addi a1, zero, 32 +; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 @@ -541,7 +541,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 16 ; LMULMAX2-RV64-NEXT: addi a1, a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret @@ -560,7 +560,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: addi a1, zero, 32 +; LMULMAX1-RV32-NEXT: li a1, 32 ; LMULMAX1-RV32-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -598,7 +598,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: addi a1, zero, 56 +; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: ret @@ -617,7 +617,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: addi a1, zero, 32 +; LMULMAX1-RV64-NEXT: li a1, 32 ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 @@ -662,7 +662,7 @@ ; LMULMAX1-RV64-NEXT: slli a1, a1, 16 ; LMULMAX1-RV64-NEXT: addi a1, a1, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: ret @@ -677,7 +677,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind { ; LMULMAX2-RV32-LABEL: ctlz_v32i8: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: addi a1, zero, 32 +; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 @@ -688,10 +688,10 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 85 +; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 51 +; LMULMAX2-RV32-NEXT: li a1, 51 ; LMULMAX2-RV32-NEXT: vand.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 @@ -704,7 +704,7 @@ ; ; LMULMAX2-RV64-LABEL: ctlz_v32i8: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: addi a1, zero, 32 +; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 @@ -715,10 +715,10 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 85 +; LMULMAX2-RV64-NEXT: li a1, 85 ; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1 ; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: addi a1, zero, 51 +; LMULMAX2-RV64-NEXT: li a1, 51 ; LMULMAX2-RV64-NEXT: vand.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 @@ -743,10 +743,10 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX1-RV32-NEXT: addi a2, zero, 85 +; LMULMAX1-RV32-NEXT: li a2, 85 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: addi a3, zero, 51 +; LMULMAX1-RV32-NEXT: li a3, 51 ; LMULMAX1-RV32-NEXT: vand.vx v10, v8, a3 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a3 @@ -789,10 +789,10 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX1-RV64-NEXT: addi a2, zero, 85 +; LMULMAX1-RV64-NEXT: li a2, 85 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: addi a3, zero, 51 +; LMULMAX1-RV64-NEXT: li a3, 51 ; LMULMAX1-RV64-NEXT: vand.vx v10, v8, a3 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a3 @@ -858,7 +858,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 1 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) @@ -893,7 +893,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 1 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) @@ -930,7 +930,7 @@ ; LMULMAX1-RV32-NEXT: lui a4, 1 ; LMULMAX1-RV32-NEXT: addi a4, a4, -241 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a4 -; LMULMAX1-RV32-NEXT: addi a5, zero, 257 +; LMULMAX1-RV32-NEXT: li a5, 257 ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a5 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 @@ -989,7 +989,7 @@ ; LMULMAX1-RV64-NEXT: lui a4, 1 ; LMULMAX1-RV64-NEXT: addiw a4, a4, -241 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4 -; LMULMAX1-RV64-NEXT: addi a5, zero, 257 +; LMULMAX1-RV64-NEXT: li a5, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a5 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 @@ -1251,7 +1251,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 32 +; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -1289,7 +1289,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret @@ -1308,7 +1308,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: addi a1, zero, 32 +; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 @@ -1353,7 +1353,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 16 ; LMULMAX2-RV64-NEXT: addi a1, a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret @@ -1374,7 +1374,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: addi a2, zero, 32 +; LMULMAX1-RV32-NEXT: li a2, 32 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1412,7 +1412,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v14, a3 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v14 -; LMULMAX1-RV32-NEXT: addi a3, zero, 56 +; LMULMAX1-RV32-NEXT: li a3, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a3 ; LMULMAX1-RV32-NEXT: vsrl.vi v15, v9, 1 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v15 @@ -1459,7 +1459,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: addi a6, zero, 32 +; LMULMAX1-RV64-NEXT: li a6, 32 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a6 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 @@ -1504,7 +1504,7 @@ ; LMULMAX1-RV64-NEXT: slli a2, a2, 16 ; LMULMAX1-RV64-NEXT: addi a2, a2, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a2 -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll @@ -10,10 +10,10 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vi v9, v8, 1 -; CHECK-NEXT: addi a1, zero, 85 +; CHECK-NEXT: li a1, 85 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: addi a1, zero, 51 +; CHECK-NEXT: li a1, 51 ; CHECK-NEXT: vand.vx v9, v8, a1 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a1 @@ -52,7 +52,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 1 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) @@ -78,7 +78,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 1 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) @@ -104,7 +104,7 @@ ; LMULMAX1-RV32-NEXT: lui a1, 1 ; LMULMAX1-RV32-NEXT: addi a1, a1, -241 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX1-RV32-NEXT: addi a1, zero, 257 +; LMULMAX1-RV32-NEXT: li a1, 257 ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) @@ -130,7 +130,7 @@ ; LMULMAX1-RV64-NEXT: lui a1, 1 ; LMULMAX1-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 257 +; LMULMAX1-RV64-NEXT: li a1, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) @@ -295,7 +295,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret @@ -345,7 +345,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 16 ; LMULMAX2-RV64-NEXT: addi a1, a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret @@ -385,7 +385,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: addi a1, zero, 56 +; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: ret @@ -435,7 +435,7 @@ ; LMULMAX1-RV64-NEXT: slli a1, a1, 16 ; LMULMAX1-RV64-NEXT: addi a1, a1, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: ret @@ -450,14 +450,14 @@ define void @ctpop_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: ctpop_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a1, zero, 32 +; LMULMAX2-NEXT: li a1, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX2-NEXT: addi a1, zero, 85 +; LMULMAX2-NEXT: li a1, 85 ; LMULMAX2-NEXT: vand.vx v10, v10, a1 ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 -; LMULMAX2-NEXT: addi a1, zero, 51 +; LMULMAX2-NEXT: li a1, 51 ; LMULMAX2-NEXT: vand.vx v10, v8, a1 ; LMULMAX2-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-NEXT: vand.vx v8, v8, a1 @@ -475,10 +475,10 @@ ; LMULMAX1-NEXT: vle8.v v8, (a1) ; LMULMAX1-NEXT: vle8.v v9, (a0) ; LMULMAX1-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX1-NEXT: addi a2, zero, 85 +; LMULMAX1-NEXT: li a2, 85 ; LMULMAX1-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-NEXT: addi a3, zero, 51 +; LMULMAX1-NEXT: li a3, 51 ; LMULMAX1-NEXT: vand.vx v10, v8, a3 ; LMULMAX1-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-NEXT: vand.vx v8, v8, a3 @@ -528,7 +528,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 1 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) @@ -554,7 +554,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 1 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) @@ -582,7 +582,7 @@ ; LMULMAX1-RV32-NEXT: lui a4, 1 ; LMULMAX1-RV32-NEXT: addi a4, a4, -241 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a4 -; LMULMAX1-RV32-NEXT: addi a5, zero, 257 +; LMULMAX1-RV32-NEXT: li a5, 257 ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a5 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 @@ -623,7 +623,7 @@ ; LMULMAX1-RV64-NEXT: lui a4, 1 ; LMULMAX1-RV64-NEXT: addiw a4, a4, -241 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4 -; LMULMAX1-RV64-NEXT: addi a5, zero, 257 +; LMULMAX1-RV64-NEXT: li a5, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a5 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 @@ -831,7 +831,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret @@ -881,7 +881,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 16 ; LMULMAX2-RV64-NEXT: addi a1, a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret @@ -923,7 +923,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v13, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v13 -; LMULMAX1-RV32-NEXT: addi a2, zero, 56 +; LMULMAX1-RV32-NEXT: li a2, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a2 ; LMULMAX1-RV32-NEXT: vsrl.vi v14, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vv v11, v14, v11 @@ -988,7 +988,7 @@ ; LMULMAX1-RV64-NEXT: slli a5, a5, 16 ; LMULMAX1-RV64-NEXT: addi a5, a5, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a5 -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -9,15 +9,15 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 85 +; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vand.vx v9, v9, a1 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: addi a1, zero, 51 +; LMULMAX2-RV32-NEXT: li a1, 51 ; LMULMAX2-RV32-NEXT: vand.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 @@ -32,15 +32,15 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 85 +; LMULMAX2-RV64-NEXT: li a1, 85 ; LMULMAX2-RV64-NEXT: vand.vx v9, v9, a1 ; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v9 -; LMULMAX2-RV64-NEXT: addi a1, zero, 51 +; LMULMAX2-RV64-NEXT: li a1, 51 ; LMULMAX2-RV64-NEXT: vand.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 @@ -55,15 +55,15 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a1, zero, 1 +; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX1-RV32-NEXT: addi a1, zero, 85 +; LMULMAX1-RV32-NEXT: li a1, 85 ; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a1 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: addi a1, zero, 51 +; LMULMAX1-RV32-NEXT: li a1, 51 ; LMULMAX1-RV32-NEXT: vand.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a1 @@ -78,15 +78,15 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, zero, 1 +; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 85 +; LMULMAX1-RV64-NEXT: li a1, 85 ; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a1 ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: addi a1, zero, 51 +; LMULMAX1-RV64-NEXT: li a1, 51 ; LMULMAX1-RV64-NEXT: vand.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1 @@ -109,7 +109,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 @@ -129,7 +129,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 1 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) @@ -139,7 +139,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 @@ -159,7 +159,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 1 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) @@ -169,7 +169,7 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a1, zero, 1 +; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 @@ -189,7 +189,7 @@ ; LMULMAX1-RV32-NEXT: lui a1, 1 ; LMULMAX1-RV32-NEXT: addi a1, a1, -241 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX1-RV32-NEXT: addi a1, zero, 257 +; LMULMAX1-RV32-NEXT: li a1, 257 ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) @@ -199,7 +199,7 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, zero, 1 +; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 @@ -219,7 +219,7 @@ ; LMULMAX1-RV64-NEXT: lui a1, 1 ; LMULMAX1-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 257 +; LMULMAX1-RV64-NEXT: li a1, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) @@ -237,7 +237,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 @@ -268,7 +268,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 @@ -299,7 +299,7 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a1, zero, 1 +; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 @@ -330,7 +330,7 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, zero, 1 +; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 @@ -369,7 +369,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-RV32-NEXT: vmv.v.i v10, -1 @@ -407,7 +407,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret @@ -416,7 +416,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 @@ -461,7 +461,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 16 ; LMULMAX2-RV64-NEXT: addi a1, a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret @@ -470,7 +470,7 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV32-NEXT: addi a1, zero, 1 +; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1 @@ -508,7 +508,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: addi a1, zero, 56 +; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: ret @@ -517,7 +517,7 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, zero, 1 +; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 @@ -562,7 +562,7 @@ ; LMULMAX1-RV64-NEXT: slli a1, a1, 16 ; LMULMAX1-RV64-NEXT: addi a1, a1, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: ret @@ -577,18 +577,18 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind { ; LMULMAX2-RV32-LABEL: cttz_v32i8: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: addi a1, zero, 32 +; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 85 +; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 51 +; LMULMAX2-RV32-NEXT: li a1, 51 ; LMULMAX2-RV32-NEXT: vand.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 @@ -601,18 +601,18 @@ ; ; LMULMAX2-RV64-LABEL: cttz_v32i8: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: addi a1, zero, 32 +; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 85 +; LMULMAX2-RV64-NEXT: li a1, 85 ; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1 ; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: addi a1, zero, 51 +; LMULMAX2-RV64-NEXT: li a1, 51 ; LMULMAX2-RV64-NEXT: vand.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 @@ -629,15 +629,15 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle8.v v9, (a0) -; LMULMAX1-RV32-NEXT: addi a2, zero, 1 +; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX1-RV32-NEXT: addi a3, zero, 85 +; LMULMAX1-RV32-NEXT: li a3, 85 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: addi a4, zero, 51 +; LMULMAX1-RV32-NEXT: li a4, 51 ; LMULMAX1-RV32-NEXT: vand.vx v10, v8, a4 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a4 @@ -668,15 +668,15 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle8.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle8.v v9, (a0) -; LMULMAX1-RV64-NEXT: addi a2, zero, 1 +; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX1-RV64-NEXT: addi a3, zero, 85 +; LMULMAX1-RV64-NEXT: li a3, 85 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: addi a4, zero, 51 +; LMULMAX1-RV64-NEXT: li a4, 51 ; LMULMAX1-RV64-NEXT: vand.vx v10, v8, a4 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4 @@ -713,7 +713,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 @@ -733,7 +733,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 1 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 ; LMULMAX2-RV32-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) @@ -743,7 +743,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 @@ -763,7 +763,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 1 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 ; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) @@ -775,7 +775,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) -; LMULMAX1-RV32-NEXT: addi a6, zero, 1 +; LMULMAX1-RV32-NEXT: li a6, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a6 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 @@ -795,7 +795,7 @@ ; LMULMAX1-RV32-NEXT: lui a5, 1 ; LMULMAX1-RV32-NEXT: addi a5, a5, -241 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a5 -; LMULMAX1-RV32-NEXT: addi a2, zero, 257 +; LMULMAX1-RV32-NEXT: li a2, 257 ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a2 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v9, a6 @@ -823,7 +823,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) -; LMULMAX1-RV64-NEXT: addi a6, zero, 1 +; LMULMAX1-RV64-NEXT: li a6, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a6 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 @@ -843,7 +843,7 @@ ; LMULMAX1-RV64-NEXT: lui a5, 1 ; LMULMAX1-RV64-NEXT: addiw a5, a5, -241 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a5 -; LMULMAX1-RV64-NEXT: addi a2, zero, 257 +; LMULMAX1-RV64-NEXT: li a2, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a2 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a6 @@ -877,7 +877,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 @@ -908,7 +908,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 @@ -941,7 +941,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) -; LMULMAX1-RV32-NEXT: addi a6, zero, 1 +; LMULMAX1-RV32-NEXT: li a6, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a6 ; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 @@ -990,7 +990,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) -; LMULMAX1-RV64-NEXT: addi a6, zero, 1 +; LMULMAX1-RV64-NEXT: li a6, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a6 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 @@ -1045,7 +1045,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 1 +; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.v.i v12, -1 @@ -1083,7 +1083,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 56 +; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret @@ -1092,7 +1092,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 1 +; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 @@ -1137,7 +1137,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 16 ; LMULMAX2-RV64-NEXT: addi a1, a1, 257 ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 -; LMULMAX2-RV64-NEXT: addi a1, zero, 56 +; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret @@ -1148,7 +1148,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) -; LMULMAX1-RV32-NEXT: addi a2, zero, 1 +; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmv.v.i v11, -1 @@ -1186,7 +1186,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v14, a3 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v14 -; LMULMAX1-RV32-NEXT: addi a3, zero, 56 +; LMULMAX1-RV32-NEXT: li a3, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a3 ; LMULMAX1-RV32-NEXT: vsub.vx v15, v9, a2 ; LMULMAX1-RV32-NEXT: vxor.vv v9, v9, v11 @@ -1213,7 +1213,7 @@ ; LMULMAX1-RV64-NEXT: addi a7, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a7) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) -; LMULMAX1-RV64-NEXT: addi a6, zero, 1 +; LMULMAX1-RV64-NEXT: li a6, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a6 ; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 @@ -1258,7 +1258,7 @@ ; LMULMAX1-RV64-NEXT: slli a2, a2, 16 ; LMULMAX1-RV64-NEXT: addi a2, a2, 257 ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a2 -; LMULMAX1-RV64-NEXT: addi a1, zero, 56 +; LMULMAX1-RV64-NEXT: li a1, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a6 ; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll @@ -94,7 +94,7 @@ define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -113,7 +113,7 @@ define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -132,7 +132,7 @@ define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_v128i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -158,7 +158,7 @@ ; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: andi a1, a1, 255 ; RV32-NEXT: addi a2, a0, 128 -; RV32-NEXT: addi a3, zero, 128 +; RV32-NEXT: li a3, 128 ; RV32-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; RV32-NEXT: vle8.v v16, (a0) ; RV32-NEXT: vle8.v v24, (a2) @@ -190,7 +190,7 @@ ; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: andi a1, a1, 255 ; RV64-NEXT: addi a2, a0, 128 -; RV64-NEXT: addi a3, zero, 128 +; RV64-NEXT: li a3, 128 ; RV64-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; RV64-NEXT: vle8.v v16, (a0) ; RV64-NEXT: vle8.v v24, (a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -212,7 +212,7 @@ define void @extract_v8i1_v64i1_0(<64 x i1>* %x, <8 x i1>* %y) { ; LMULMAX2-LABEL: extract_v8i1_v64i1_0: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -235,7 +235,7 @@ define void @extract_v8i1_v64i1_8(<64 x i1>* %x, <8 x i1>* %y) { ; LMULMAX2-LABEL: extract_v8i1_v64i1_8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu @@ -263,7 +263,7 @@ ; LMULMAX2-LABEL: extract_v8i1_v64i1_48: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a0, a0, 4 -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu @@ -338,7 +338,7 @@ define void @extract_v2i1_v64i1_0(<64 x i1>* %x, <2 x i1>* %y) { ; LMULMAX2-LABEL: extract_v2i1_v64i1_0: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, mu @@ -377,7 +377,7 @@ define void @extract_v2i1_v64i1_2(<64 x i1>* %x, <2 x i1>* %y) { ; LMULMAX2-LABEL: extract_v2i1_v64i1_2: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vmv.v.i v8, 0 @@ -427,7 +427,7 @@ ; LMULMAX2-LABEL: extract_v2i1_v64i1_42: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a0, a0, 4 -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vmv.v.i v8, 0 @@ -568,7 +568,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: addi a1, zero, 42 +; CHECK-NEXT: li a1, 42 ; CHECK-NEXT: vsetivli zero, 2, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -49,7 +49,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 @@ -110,7 +110,7 @@ define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind { ; CHECK-LABEL: extractelt_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu @@ -158,7 +158,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -298,7 +298,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vslidedown.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -369,7 +369,7 @@ define i8 @extractelt_v32i8_idx(<32 x i8>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu @@ -420,7 +420,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; RV32-NEXT: vslidedown.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -575,7 +575,7 @@ ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vslidedown.vi v8, v8, 1 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: vmv.x.s a2, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll @@ -80,7 +80,7 @@ define i64 @bitcast_v4f16_i64(<4 x half> %a) { ; RV32-FP-LABEL: bitcast_v4f16_i64: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: addi a0, zero, 32 +; RV32-FP-NEXT: li a0, 32 ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 @@ -99,7 +99,7 @@ define i64 @bitcast_v2f32_i64(<2 x float> %a) { ; RV32-FP-LABEL: bitcast_v2f32_i64: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: addi a0, zero, 32 +; RV32-FP-NEXT: li a0, 32 ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 @@ -118,7 +118,7 @@ define i64 @bitcast_v1f64_i64(<1 x double> %a) { ; RV32-FP-LABEL: bitcast_v1f64_i64: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: addi a0, zero, 32 +; RV32-FP-NEXT: li a0, 32 ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -57,19 +57,19 @@ ; ; LMULMAX2-LABEL: hang_when_merging_stores_after_legalization: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a0, zero, 2 +; LMULMAX2-NEXT: li a0, 2 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.s.x v0, a0 ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-NEXT: vrgather.vi v12, v8, 0 ; LMULMAX2-NEXT: vrgather.vi v12, v9, 3, v0.t -; LMULMAX2-NEXT: addi a0, zero, 8 +; LMULMAX2-NEXT: li a0, 8 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.s.x v0, a0 ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-NEXT: vrgather.vi v8, v10, 0 ; LMULMAX2-NEXT: vrgather.vi v8, v11, 3, v0.t -; LMULMAX2-NEXT: addi a0, zero, 3 +; LMULMAX2-NEXT: li a0, 3 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.s.x v0, a0 ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -251,7 +251,7 @@ define void @fcmp_ule_vv_v32f16(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ule_vv_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) @@ -269,7 +269,7 @@ define void @fcmp_ule_vv_v32f16_nonans(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ule_vv_v32f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) @@ -352,7 +352,7 @@ define void @fcmp_ugt_vv_v64f16(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z) { ; CHECK-LABEL: fcmp_ugt_vv_v64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) @@ -370,7 +370,7 @@ define void @fcmp_ugt_vv_v64f16_nonans(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z) { ; CHECK-LABEL: fcmp_ugt_vv_v64f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) @@ -387,7 +387,7 @@ define void @fcmp_ueq_vv_v32f32(<32 x float>* %x, <32 x float>* %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ueq_vv_v32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) @@ -406,7 +406,7 @@ define void @fcmp_ueq_vv_v32f32_nonans(<32 x float>* %x, <32 x float>* %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ueq_vv_v32f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) @@ -757,7 +757,7 @@ define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ule_vf_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 @@ -775,7 +775,7 @@ define void @fcmp_ule_vf_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ule_vf_v32f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 @@ -858,7 +858,7 @@ define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) { ; CHECK-LABEL: fcmp_ugt_vf_v64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v16, v8, fa0 @@ -876,7 +876,7 @@ define void @fcmp_ugt_vf_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z) { ; CHECK-LABEL: fcmp_ugt_vf_v64f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 @@ -893,7 +893,7 @@ define void @fcmp_ueq_vf_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ueq_vf_v32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmflt.vf v16, v8, fa0 @@ -912,7 +912,7 @@ define void @fcmp_ueq_vf_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ueq_vf_v32f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v16, v8, fa0 @@ -1265,7 +1265,7 @@ define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ule_fv_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 @@ -1283,7 +1283,7 @@ define void @fcmp_ule_fv_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ule_fv_v32f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 @@ -1366,7 +1366,7 @@ define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) { ; CHECK-LABEL: fcmp_ugt_fv_v64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v16, v8, fa0 @@ -1384,7 +1384,7 @@ define void @fcmp_ugt_fv_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z) { ; CHECK-LABEL: fcmp_ugt_fv_v64f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v16, v8, fa0 @@ -1401,7 +1401,7 @@ define void @fcmp_ueq_fv_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ueq_fv_v32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 @@ -1420,7 +1420,7 @@ define void @fcmp_ueq_fv_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z) { ; CHECK-LABEL: fcmp_ueq_fv_v32f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v16, v8, fa0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -5,7 +5,7 @@ define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) { ; CHECK-LABEL: shuffle_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 11 +; CHECK-NEXT: li a0, 11 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -18,7 +18,7 @@ define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) { ; CHECK-LABEL: shuffle_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 236 +; CHECK-NEXT: li a0, 236 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -31,7 +31,7 @@ define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) { ; RV32-LABEL: shuffle_fv_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 9 +; RV32-NEXT: li a0, 9 ; RV32-NEXT: lui a1, %hi(.LCPI2_0) ; RV32-NEXT: fld ft0, %lo(.LCPI2_0)(a1) ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu @@ -44,7 +44,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI2_0) ; RV64-NEXT: fld ft0, %lo(.LCPI2_0)(a0) -; RV64-NEXT: addi a0, zero, 9 +; RV64-NEXT: li a0, 9 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -57,7 +57,7 @@ define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) { ; RV32-LABEL: shuffle_vf_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 6 +; RV32-NEXT: li a0, 6 ; RV32-NEXT: lui a1, %hi(.LCPI3_0) ; RV32-NEXT: fld ft0, %lo(.LCPI3_0)(a1) ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu @@ -70,7 +70,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI3_0) ; RV64-NEXT: fld ft0, %lo(.LCPI3_0)(a0) -; RV64-NEXT: addi a0, zero, 6 +; RV64-NEXT: li a0, 6 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -139,7 +139,7 @@ ; RV32-NEXT: vle16.v v14, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v14 -; RV32-NEXT: addi a0, zero, 8 +; RV32-NEXT: li a0, 8 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -154,7 +154,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v14, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v14 -; RV64-NEXT: addi a0, zero, 8 +; RV64-NEXT: li a0, 8 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -168,7 +168,7 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) { ; RV32-LABEL: vrgather_shuffle_xv_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 12 +; RV32-NEXT: li a0, 12 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -184,7 +184,7 @@ ; ; RV64-LABEL: vrgather_shuffle_xv_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 12 +; RV64-NEXT: li a0, 12 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -205,7 +205,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vid.v v10 -; RV32-NEXT: addi a0, zero, 3 +; RV32-NEXT: li a0, 3 ; RV32-NEXT: vmul.vx v12, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 @@ -221,7 +221,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vid.v v10 -; RV64-NEXT: addi a0, zero, 3 +; RV64-NEXT: li a0, 3 ; RV64-NEXT: vmul.vx v12, v10, a0 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll @@ -55,7 +55,7 @@ ; LMULMAX8-LABEL: gather_const_v64f16: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a1, a0, 94 -; LMULMAX8-NEXT: addi a2, zero, 64 +; LMULMAX8-NEXT: li a2, 64 ; LMULMAX8-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; LMULMAX8-NEXT: vlse16.v v8, (a1), zero ; LMULMAX8-NEXT: vse16.v v8, (a0) @@ -94,7 +94,7 @@ ; LMULMAX8-LABEL: gather_const_v32f32: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a1, a0, 68 -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vlse32.v v8, (a1), zero ; LMULMAX8-NEXT: vse32.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll @@ -156,7 +156,7 @@ define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind { ; CHECK-LABEL: insertelt_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 @@ -174,7 +174,7 @@ define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind { ; RV32-LABEL: insertelt_idx_v64i1: ; RV32: # %bb.0: -; RV32-NEXT: addi a2, zero, 64 +; RV32-NEXT: li a2, 64 ; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v12, 0 @@ -189,7 +189,7 @@ ; ; RV64-LABEL: insertelt_idx_v64i1: ; RV64: # %bb.0: -; RV64-NEXT: addi a2, zero, 64 +; RV64-NEXT: li a2, 64 ; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v12, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -315,7 +315,7 @@ define void @insert_v32i1_v8i1_0(<32 x i1>* %vp, <8 x i1>* %svp) { ; LMULMAX2-LABEL: insert_v32i1_v8i1_0: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -347,7 +347,7 @@ define void @insert_v32i1_v8i1_16(<32 x i1>* %vp, <8 x i1>* %svp) { ; LMULMAX2-LABEL: insert_v32i1_v8i1_16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -97,7 +97,7 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) { ; RV32-LABEL: insertelt_v32i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a3, zero, 32 +; RV32-NEXT: li a3, 32 ; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, a1 @@ -110,7 +110,7 @@ ; ; RV64-LABEL: insertelt_v32i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a3, zero, 32 +; RV64-NEXT: li a3, 32 ; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, a1 @@ -163,7 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -179,7 +179,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a2, zero, -1 +; RV32-NEXT: li a2, -1 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -192,7 +192,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a2, zero, -1 +; RV64-NEXT: li a2, -1 ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 @@ -212,7 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a1, zero, 6 +; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -228,7 +228,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a2, zero, 6 +; RV32-NEXT: li a2, 6 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -241,7 +241,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a2, zero, 6 +; RV64-NEXT: li a2, 6 ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 @@ -263,7 +263,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a2, zero, 6 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a2 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -55,7 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: addi a1, zero, 3 +; CHECK-NEXT: li a1, 3 ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -160,7 +160,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: addi a1, zero, -3 +; CHECK-NEXT: li a1, -3 ; CHECK-NEXT: vmadd.vx v9, a1, v8 ; CHECK-NEXT: vse8.v v9, (a0) ; CHECK-NEXT: ret @@ -174,7 +174,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, -3 ; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: addi a4, zero, -3 +; CHECK-NEXT: li a4, -3 ; CHECK-NEXT: vmadd.vx v9, a4, v8 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: vse32.v v9, (a1) @@ -192,7 +192,7 @@ define <4 x i64> @buildvec_vid_step1_add0_v4i64() { ; RV32-LABEL: buildvec_vid_step1_add0_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 @@ -216,7 +216,7 @@ define <4 x i64> @buildvec_vid_step2_add0_v4i64() { ; RV32-LABEL: buildvec_vid_step2_add0_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 2 +; RV32-NEXT: li a0, 2 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 @@ -256,7 +256,7 @@ ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV32-NEXT: vse8.v v8, (a2) -; RV32-NEXT: addi a0, zero, 2047 +; RV32-NEXT: li a0, 2047 ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu @@ -286,7 +286,7 @@ ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV64-NEXT: vse8.v v8, (a2) -; RV64-NEXT: addi a0, zero, 2047 +; RV64-NEXT: li a0, 2047 ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu @@ -419,7 +419,7 @@ define void @buildvec_seq_v8i8_v4i16(<8 x i8>* %x) { ; CHECK-LABEL: buildvec_seq_v8i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 513 +; CHECK-NEXT: li a1, 513 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -507,15 +507,15 @@ define void @buildvec_seq_v9i8(<9 x i8>* %x) { ; RV32-LABEL: buildvec_seq_v9i8: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 3 +; RV32-NEXT: li a1, 3 ; RV32-NEXT: sb a1, 8(a0) -; RV32-NEXT: addi a1, zero, 73 +; RV32-NEXT: li a1, 73 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v8, 2 ; RV32-NEXT: vmerge.vim v8, v8, 1, v0 -; RV32-NEXT: addi a1, zero, 36 +; RV32-NEXT: li a1, 36 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -525,7 +525,7 @@ ; ; RV64-LABEL: buildvec_seq_v9i8: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, zero, 3 +; RV64-NEXT: li a1, 3 ; RV64-NEXT: sb a1, 8(a0) ; RV64-NEXT: lui a1, 4104 ; RV64-NEXT: addiw a1, a1, 385 @@ -542,7 +542,7 @@ define void @buildvec_seq_v4i16_v2i32(<4 x i16>* %x) { ; CHECK-LABEL: buildvec_seq_v4i16_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, -127 +; CHECK-NEXT: li a1, -127 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -569,7 +569,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a5) -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu @@ -600,14 +600,14 @@ ; CHECK-NEXT: vse16.v v8, (a2) ; CHECK-NEXT: vse16.v v8, (a3) ; CHECK-NEXT: vse16.v v8, (a4) -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v9, (a5) -; CHECK-NEXT: addi a0, zero, 4 +; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -77,7 +77,7 @@ define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) { ; LMULMAX8-LABEL: sext_v32i8_v32i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m8, ta, mu @@ -87,7 +87,7 @@ ; ; LMULMAX2-LABEL: sext_v32i8_v32i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll @@ -28,7 +28,7 @@ define void @setne_vv_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; CHECK-LABEL: setne_vv_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v10, (a1) @@ -48,7 +48,7 @@ define void @setgt_vv_v64i8(<64 x i8>* %x, <64 x i8>* %y, <64 x i1>* %z) { ; CHECK-LABEL: setgt_vv_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v12, (a1) @@ -65,7 +65,7 @@ define void @setlt_vv_v128i8(<128 x i8>* %x, <128 x i8>* %y, <128 x i1>* %z) { ; CHECK-LABEL: setlt_vv_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v16, (a1) @@ -114,7 +114,7 @@ define void @setugt_vv_v32i8(<32 x i8>* %x, <32 x i8>* %y, <32 x i1>* %z) { ; CHECK-LABEL: setugt_vv_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v10, (a1) @@ -131,7 +131,7 @@ define void @setult_vv_v64i8(<64 x i8>* %x, <64 x i8>* %y, <64 x i1>* %z) { ; CHECK-LABEL: setult_vv_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v12, (a1) @@ -148,7 +148,7 @@ define void @setuge_vv_v128i8(<128 x i8>* %x, <128 x i8>* %y, <128 x i1>* %z) { ; CHECK-LABEL: setuge_vv_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v16, (a1) @@ -197,7 +197,7 @@ define void @setne_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) { ; CHECK-LABEL: setne_vx_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vx v10, v8, a1 @@ -214,7 +214,7 @@ define void @setgt_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) { ; CHECK-LABEL: setgt_vx_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v12, v8, a1 @@ -231,7 +231,7 @@ define void @setlt_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) { ; CHECK-LABEL: setlt_vx_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v16, v8, a1 @@ -281,7 +281,7 @@ define void @setugt_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) { ; CHECK-LABEL: setugt_vx_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vx v10, v8, a1 @@ -298,7 +298,7 @@ define void @setult_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) { ; CHECK-LABEL: setult_vx_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsltu.vx v12, v8, a1 @@ -315,7 +315,7 @@ define void @setuge_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) { ; CHECK-LABEL: setuge_vx_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.v.x v16, a1 @@ -365,7 +365,7 @@ define void @setne_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) { ; CHECK-LABEL: setne_xv_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vx v10, v8, a1 @@ -382,7 +382,7 @@ define void @setgt_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) { ; CHECK-LABEL: setgt_xv_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v12, v8, a1 @@ -399,7 +399,7 @@ define void @setlt_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) { ; CHECK-LABEL: setlt_xv_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v16, v8, a1 @@ -449,7 +449,7 @@ define void @setugt_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) { ; CHECK-LABEL: setugt_xv_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsltu.vx v10, v8, a1 @@ -466,7 +466,7 @@ define void @setult_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) { ; CHECK-LABEL: setult_xv_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vx v12, v8, a1 @@ -483,7 +483,7 @@ define void @setuge_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) { ; CHECK-LABEL: setuge_xv_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vx v16, v8, a1 @@ -533,7 +533,7 @@ define void @setne_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) { ; CHECK-LABEL: setne_vi_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vi v10, v8, 0 @@ -550,7 +550,7 @@ define void @setgt_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) { ; CHECK-LABEL: setgt_vi_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v12, v8, zero @@ -567,7 +567,7 @@ define void @setlt_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) { ; CHECK-LABEL: setlt_vi_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vi v16, v8, -1 @@ -616,10 +616,10 @@ define void @setugt_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) { ; CHECK-LABEL: setugt_vi_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 5 +; CHECK-NEXT: li a0, 5 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0 ; CHECK-NEXT: vsm.v v10, (a1) ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define void @setult_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) { ; CHECK-LABEL: setult_vi_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vi v12, v8, 4 @@ -651,7 +651,7 @@ define void @setuge_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) { ; CHECK-LABEL: setuge_vi_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vi v16, v8, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -5,7 +5,7 @@ define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) { ; CHECK-LABEL: shuffle_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 11 +; CHECK-NEXT: li a0, 11 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -18,7 +18,7 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) { ; CHECK-LABEL: shuffle_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 203 +; CHECK-NEXT: li a0, 203 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -31,7 +31,7 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) { ; CHECK-LABEL: shuffle_xv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 9 +; CHECK-NEXT: li a0, 9 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -44,7 +44,7 @@ define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) { ; CHECK-LABEL: shuffle_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 6 +; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -90,7 +90,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v11, (a0) ; CHECK-NEXT: vrgather.vv v10, v8, v11 -; CHECK-NEXT: addi a0, zero, 8 +; CHECK-NEXT: li a0, 8 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -104,7 +104,7 @@ define <4 x i16> @vrgather_shuffle_xv_v4i16(<4 x i16> %x) { ; CHECK-LABEL: vrgather_shuffle_xv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 12 +; CHECK-NEXT: li a0, 12 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu @@ -123,7 +123,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v10, v9, a0 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 @@ -189,7 +189,7 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) { ; RV32-LABEL: vrgather_shuffle_vv_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 5 +; RV32-NEXT: li a0, 5 ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 ; RV32-NEXT: vmv.v.i v20, 2 @@ -201,7 +201,7 @@ ; RV32-NEXT: vle16.v v21, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v16, v8, v21 -; RV32-NEXT: addi a0, zero, 164 +; RV32-NEXT: li a0, 164 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu @@ -211,7 +211,7 @@ ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 5 +; RV64-NEXT: li a0, 5 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vmv.v.i v20, 2 @@ -222,7 +222,7 @@ ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vrgather.vv v16, v8, v24 -; RV64-NEXT: addi a0, zero, 164 +; RV64-NEXT: li a0, 164 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu @@ -243,7 +243,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vmv.v.i v20, -1 ; RV32-NEXT: vrgatherei16.vv v12, v20, v16 -; RV32-NEXT: addi a0, zero, 113 +; RV32-NEXT: li a0, 113 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: lui a0, %hi(.LCPI12_1) @@ -257,7 +257,7 @@ ; ; RV64-LABEL: vrgather_shuffle_xv_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 113 +; RV64-NEXT: li a0, 113 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: lui a0, %hi(.LCPI12_0) @@ -282,7 +282,7 @@ ; RV32-NEXT: vmv4r.v v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v8, v12, v16 -; RV32-NEXT: addi a0, zero, 140 +; RV32-NEXT: li a0, 140 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: lui a0, %hi(.LCPI13_1) @@ -296,7 +296,7 @@ ; ; RV64-LABEL: vrgather_shuffle_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 115 +; RV64-NEXT: li a0, 115 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: lui a0, %hi(.LCPI13_0) @@ -318,7 +318,7 @@ ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 1 -; CHECK-NEXT: addi a1, zero, 10 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu @@ -349,7 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu @@ -363,7 +363,7 @@ define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) { ; CHECK-LABEL: splat_ve4_ins_i1ve3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v10, 4 @@ -380,7 +380,7 @@ define <8 x i8> @splat_ve2_we0(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 66 +; CHECK-NEXT: li a0, 66 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -397,12 +397,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v11, 2 -; CHECK-NEXT: addi a0, zero, 4 +; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v11, a0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v11 -; CHECK-NEXT: addi a0, zero, 66 +; CHECK-NEXT: li a0, 66 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -416,7 +416,7 @@ define <8 x i8> @splat_ve2_we0_ins_i0we4(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0_ins_i0we4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 67 +; CHECK-NEXT: li a0, 67 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -440,7 +440,7 @@ ; RV32-NEXT: vmv.v.x v11, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vv v10, v8, v11 -; RV32-NEXT: addi a0, zero, 66 +; RV32-NEXT: li a0, 66 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -456,7 +456,7 @@ ; RV64-NEXT: vmv.v.x v11, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vv v10, v8, v11 -; RV64-NEXT: addi a0, zero, 66 +; RV64-NEXT: li a0, 66 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -470,13 +470,13 @@ define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0_ins_i2we4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 4 +; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v11, v10, 2 -; CHECK-NEXT: addi a0, zero, 70 +; CHECK-NEXT: li a0, 70 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -491,7 +491,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) { ; RV32-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 6 +; RV32-NEXT: li a0, 6 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vmv.v.i v11, 0 @@ -503,7 +503,7 @@ ; RV32-NEXT: vmv.v.x v12, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vv v10, v8, v12 -; RV32-NEXT: addi a0, zero, 98 +; RV32-NEXT: li a0, 98 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -513,7 +513,7 @@ ; ; RV64-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 6 +; RV64-NEXT: li a0, 6 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vmv.v.i v11, 0 @@ -525,7 +525,7 @@ ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vv v10, v8, v12 -; RV64-NEXT: addi a0, zero, 98 +; RV64-NEXT: li a0, 98 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -114,7 +114,7 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) { ; LMULMAX8-LABEL: splat_v32i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a2, zero, 32 +; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse8.v v8, (a0) @@ -122,7 +122,7 @@ ; ; LMULMAX2-LABEL: splat_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse8.v v8, (a0) @@ -229,7 +229,7 @@ ; ; LMULMAX1-RV32-LABEL: splat_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: addi a3, zero, 5 +; LMULMAX1-RV32-NEXT: li a3, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a3 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -323,7 +323,7 @@ define void @splat_zero_v32i8(<32 x i8>* %x) { ; LMULMAX8-LABEL: splat_zero_v32i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, zero, 32 +; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a0) @@ -331,7 +331,7 @@ ; ; LMULMAX2-LABEL: splat_zero_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a1, zero, 32 +; LMULMAX2-NEXT: li a1, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse8.v v8, (a0) @@ -502,7 +502,7 @@ define void @splat_allones_v32i8(<32 x i8>* %x) { ; LMULMAX8-LABEL: splat_allones_v32i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a1, zero, 32 +; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse8.v v8, (a0) @@ -510,7 +510,7 @@ ; ; LMULMAX2-LABEL: splat_allones_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a1, zero, 32 +; LMULMAX2-NEXT: li a1, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse8.v v8, (a0) @@ -709,7 +709,7 @@ ; LMULMAX2-RV32-NEXT: vle64.v v12, (a0) ; LMULMAX2-RV32-NEXT: addi a0, a0, 32 ; LMULMAX2-RV32-NEXT: vle64.v v14, (a0) -; LMULMAX2-RV32-NEXT: addi a0, zero, 85 +; LMULMAX2-RV32-NEXT: li a0, 85 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a0 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -747,7 +747,7 @@ ; LMULMAX1-RV32-NEXT: vle64.v v14, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v15, (a0) -; LMULMAX1-RV32-NEXT: addi a0, zero, 5 +; LMULMAX1-RV32-NEXT: li a0, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -72,7 +72,7 @@ ; LMULMAX4-LABEL: gather_const_v64i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, a0, 32 -; LMULMAX4-NEXT: addi a2, zero, 64 +; LMULMAX4-NEXT: li a2, 64 ; LMULMAX4-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; LMULMAX4-NEXT: vlse8.v v8, (a1), zero ; LMULMAX4-NEXT: vse8.v v8, (a0) @@ -102,7 +102,7 @@ ; LMULMAX4-LABEL: gather_const_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, a0, 50 -; LMULMAX4-NEXT: addi a2, zero, 32 +; LMULMAX4-NEXT: li a2, 32 ; LMULMAX4-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; LMULMAX4-NEXT: vlse16.v v8, (a1), zero ; LMULMAX4-NEXT: vse16.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -841,7 +841,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vle8.v v8, (a0) -; RV32-NEXT: addi a1, zero, 513 +; RV32-NEXT: li a1, 513 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu @@ -865,7 +865,7 @@ ; RV32-NEXT: vmv.s.x v0, a2 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 -; RV32-NEXT: addi a2, zero, -128 +; RV32-NEXT: li a2, -128 ; RV32-NEXT: vmerge.vxm v11, v10, a2, v0 ; RV32-NEXT: addi a1, a1, 32 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu @@ -888,7 +888,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vle8.v v8, (a0) -; RV64-NEXT: addi a1, zero, 513 +; RV64-NEXT: li a1, 513 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vmv.s.x v0, a1 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu @@ -912,7 +912,7 @@ ; RV64-NEXT: vmv.s.x v0, a2 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, 0 -; RV64-NEXT: addi a2, zero, -128 +; RV64-NEXT: li a2, -128 ; RV64-NEXT: vmerge.vxm v11, v10, a2, v0 ; RV64-NEXT: addiw a1, a1, 32 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu @@ -941,9 +941,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: addi a1, zero, 1 +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vmv.s.x v9, a1 -; CHECK-NEXT: addi a1, zero, 33 +; CHECK-NEXT: li a1, 33 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu @@ -995,7 +995,7 @@ ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: vmulhu.vv v8, v8, v10 ; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: addi a1, zero, 1 +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 2 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu @@ -1078,9 +1078,9 @@ ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, 7 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 -; RV32-NEXT: addi a1, zero, -123 +; RV32-NEXT: li a1, -123 ; RV32-NEXT: vmv.v.x v10, a1 -; RV32-NEXT: addi a1, zero, 57 +; RV32-NEXT: li a1, 57 ; RV32-NEXT: vmerge.vxm v10, v10, a1, v0 ; RV32-NEXT: vmulhu.vv v8, v8, v10 ; RV32-NEXT: vsrl.vv v8, v8, v9 @@ -1098,9 +1098,9 @@ ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, 7 ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 -; RV64-NEXT: addi a1, zero, -123 +; RV64-NEXT: li a1, -123 ; RV64-NEXT: vmv.v.x v10, a1 -; RV64-NEXT: addi a1, zero, 57 +; RV64-NEXT: li a1, 57 ; RV64-NEXT: vmerge.vxm v10, v10, a1, v0 ; RV64-NEXT: vmulhu.vv v8, v8, v10 ; RV64-NEXT: vsrl.vv v8, v8, v9 @@ -1117,7 +1117,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: addi a1, zero, 105 +; RV32-NEXT: li a1, 105 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: lui a1, 5 @@ -1138,7 +1138,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: addi a1, zero, 105 +; RV64-NEXT: li a1, 105 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a1 ; RV64-NEXT: lui a1, 5 @@ -1165,7 +1165,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: addi a1, zero, 5 +; RV32-NEXT: li a1, 5 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: lui a1, 419430 @@ -1229,7 +1229,7 @@ ; RV32-NEXT: vrsub.vi v10, v10, 0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vmadd.vv v10, v8, v9 -; RV32-NEXT: addi a1, zero, 1 +; RV32-NEXT: li a1, 1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v8, a1 ; RV32-NEXT: vmv.v.i v9, 0 @@ -1237,7 +1237,7 @@ ; RV32-NEXT: vslideup.vi v9, v8, 2 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsra.vv v8, v10, v9 -; RV32-NEXT: addi a1, zero, 63 +; RV32-NEXT: li a1, 63 ; RV32-NEXT: vsrl.vx v9, v10, a1 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) @@ -1264,7 +1264,7 @@ ; RV64-NEXT: vid.v v10 ; RV64-NEXT: vrsub.vi v11, v10, 0 ; RV64-NEXT: vmadd.vv v11, v8, v9 -; RV64-NEXT: addi a1, zero, 63 +; RV64-NEXT: li a1, 63 ; RV64-NEXT: vsrl.vx v8, v11, a1 ; RV64-NEXT: vsra.vv v9, v11, v10 ; RV64-NEXT: vadd.vv v8, v9, v8 @@ -1551,7 +1551,7 @@ define void @add_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: add_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -1736,7 +1736,7 @@ define void @sub_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: sub_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -1921,7 +1921,7 @@ define void @mul_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: mul_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -2106,7 +2106,7 @@ define void @and_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: and_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -2291,7 +2291,7 @@ define void @or_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: or_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -2476,7 +2476,7 @@ define void @xor_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: xor_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -2661,7 +2661,7 @@ define void @lshr_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: lshr_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -2846,7 +2846,7 @@ define void @ashr_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: ashr_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -3031,7 +3031,7 @@ define void @shl_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: shl_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -3216,7 +3216,7 @@ define void @sdiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: sdiv_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -3401,7 +3401,7 @@ define void @srem_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: srem_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -3586,7 +3586,7 @@ define void @udiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: udiv_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -3771,7 +3771,7 @@ define void @urem_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: urem_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -3989,7 +3989,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) { ; LMULMAX2-RV32-LABEL: mulhu_v32i8: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: addi a1, zero, 32 +; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV32-NEXT: lui a2, 66049 @@ -4009,7 +4009,7 @@ ; LMULMAX2-RV32-NEXT: addi a2, a2, -2044 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV32-NEXT: addi a2, zero, -128 +; LMULMAX2-RV32-NEXT: li a2, -128 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmerge.vxm v12, v12, a2, v0 ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v12 @@ -4039,7 +4039,7 @@ ; ; LMULMAX2-RV64-LABEL: mulhu_v32i8: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: addi a1, zero, 32 +; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV64-NEXT: lui a2, 66049 @@ -4059,7 +4059,7 @@ ; LMULMAX2-RV64-NEXT: addiw a2, a2, -2044 ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV64-NEXT: addi a2, zero, -128 +; LMULMAX2-RV64-NEXT: li a2, -128 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vmerge.vxm v12, v12, a2, v0 ; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v12 @@ -4126,7 +4126,7 @@ ; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmv1r.v v0, v8 ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 1, v0 -; LMULMAX2-RV32-NEXT: addi a1, zero, 257 +; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu @@ -4165,7 +4165,7 @@ ; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-RV64-NEXT: vmv1r.v v0, v8 ; LMULMAX2-RV64-NEXT: vmerge.vim v12, v12, 1, v0 -; LMULMAX2-RV64-NEXT: addi a1, zero, 257 +; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu @@ -4211,7 +4211,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: addi a1, zero, 68 +; LMULMAX2-NEXT: li a1, 68 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.s.x v0, a1 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -4225,7 +4225,7 @@ ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vmulhu.vv v8, v8, v12 ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 -; LMULMAX2-NEXT: addi a1, zero, 136 +; LMULMAX2-NEXT: li a1, 136 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.s.x v0, a1 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -4254,7 +4254,7 @@ ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v12 ; LMULMAX1-RV32-NEXT: vmulhu.vv v9, v9, v11 ; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v12 -; LMULMAX1-RV32-NEXT: addi a2, zero, 1 +; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v13, 2 ; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu @@ -4324,7 +4324,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, -1 +; LMULMAX2-RV64-NEXT: li a1, -1 ; LMULMAX2-RV64-NEXT: slli a1, a1, 63 ; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1 ; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 @@ -4374,7 +4374,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV64-NEXT: vmv.v.i v10, 0 -; LMULMAX1-RV64-NEXT: addi a2, zero, -1 +; LMULMAX1-RV64-NEXT: li a2, -1 ; LMULMAX1-RV64-NEXT: slli a2, a2, 63 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v10, a2 @@ -4441,16 +4441,16 @@ define void @mulhs_v32i8(<32 x i8>* %x) { ; LMULMAX2-RV32-LABEL: mulhs_v32i8: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: addi a1, zero, 32 +; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a2, zero, -123 +; LMULMAX2-RV32-NEXT: li a2, -123 ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a2 ; LMULMAX2-RV32-NEXT: lui a2, 304453 ; LMULMAX2-RV32-NEXT: addi a2, a2, -1452 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV32-NEXT: addi a2, zero, 57 +; LMULMAX2-RV32-NEXT: li a2, 57 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmerge.vxm v10, v10, a2, v0 ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v10 @@ -4462,16 +4462,16 @@ ; ; LMULMAX2-RV64-LABEL: mulhs_v32i8: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: addi a1, zero, 32 +; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a2, zero, -123 +; LMULMAX2-RV64-NEXT: li a2, -123 ; LMULMAX2-RV64-NEXT: vmv.v.x v10, a2 ; LMULMAX2-RV64-NEXT: lui a2, 304453 ; LMULMAX2-RV64-NEXT: addiw a2, a2, -1452 ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV64-NEXT: addi a2, zero, 57 +; LMULMAX2-RV64-NEXT: li a2, 57 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vmerge.vxm v10, v10, a2, v0 ; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v10 @@ -4575,7 +4575,7 @@ ; LMULMAX1-NEXT: vle16.v v8, (a0) ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v9, (a1) -; LMULMAX1-NEXT: addi a2, zero, 105 +; LMULMAX1-NEXT: li a2, 105 ; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.s.x v0, a2 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu @@ -4597,7 +4597,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 85 +; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: lui a1, 419430 @@ -4642,7 +4642,7 @@ ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a1) -; LMULMAX1-RV32-NEXT: addi a2, zero, 5 +; LMULMAX1-RV32-NEXT: li a2, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a2 ; LMULMAX1-RV32-NEXT: lui a2, 419430 @@ -4670,7 +4670,7 @@ ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a1) -; LMULMAX1-RV64-NEXT: addi a2, zero, 3 +; LMULMAX1-RV64-NEXT: li a2, 3 ; LMULMAX1-RV64-NEXT: slli a2, a2, 33 ; LMULMAX1-RV64-NEXT: addi a2, a2, -5 ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu @@ -4692,7 +4692,7 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: addi a1, zero, 17 +; LMULMAX2-RV32-NEXT: li a1, 17 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 @@ -4703,7 +4703,7 @@ ; LMULMAX2-RV32-NEXT: vmerge.vxm v10, v10, a1, v0 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmulh.vv v10, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 51 +; LMULMAX2-RV32-NEXT: li a1, 51 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -4711,9 +4711,9 @@ ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmadd.vv v12, v8, v10 -; LMULMAX2-RV32-NEXT: addi a1, zero, 63 +; LMULMAX2-RV32-NEXT: li a1, 63 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v12, a1 -; LMULMAX2-RV32-NEXT: addi a1, zero, 68 +; LMULMAX2-RV32-NEXT: li a1, 68 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -4729,7 +4729,7 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: addi a1, zero, 5 +; LMULMAX2-RV64-NEXT: li a1, 5 ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -4748,7 +4748,7 @@ ; LMULMAX2-RV64-NEXT: vmerge.vxm v12, v12, a1, v0 ; LMULMAX2-RV64-NEXT: vmulh.vv v12, v8, v12 ; LMULMAX2-RV64-NEXT: vmacc.vv v12, v8, v10 -; LMULMAX2-RV64-NEXT: addi a1, zero, 63 +; LMULMAX2-RV64-NEXT: li a1, 63 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v12, a1 ; LMULMAX2-RV64-NEXT: vmv.v.i v10, 1 ; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 0, v0 @@ -4797,7 +4797,7 @@ ; LMULMAX1-RV64-NEXT: vid.v v12 ; LMULMAX1-RV64-NEXT: vrsub.vi v13, v12, 0 ; LMULMAX1-RV64-NEXT: vmacc.vv v11, v13, v9 -; LMULMAX1-RV64-NEXT: addi a2, zero, 63 +; LMULMAX1-RV64-NEXT: li a2, 63 ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v11, a2 ; LMULMAX1-RV64-NEXT: vsra.vv v11, v11, v12 ; LMULMAX1-RV64-NEXT: vadd.vv v9, v11, v9 @@ -4818,7 +4818,7 @@ define void @smin_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: smin_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -5007,7 +5007,7 @@ define void @smax_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: smax_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -5196,7 +5196,7 @@ define void @umin_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: umin_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -5385,7 +5385,7 @@ define void @umax_v32i8(<32 x i8>* %x, <32 x i8>* %y) { ; LMULMAX2-LABEL: umax_v32i8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) @@ -5800,7 +5800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -5817,7 +5817,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -5834,7 +5834,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -5851,7 +5851,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -7324,7 +7324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a1, zero, 57 +; CHECK-NEXT: li a1, 57 ; CHECK-NEXT: vmulhu.vx v8, v8, a1 ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7444,7 +7444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a1, zero, -123 +; CHECK-NEXT: li a1, -123 ; CHECK-NEXT: vmulhu.vx v8, v8, a1 ; CHECK-NEXT: vsrl.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7534,7 +7534,7 @@ ; RV32-NEXT: addi a1, sp, 8 ; RV32-NEXT: vlse64.v v9, (a1), zero ; RV32-NEXT: vmulh.vv v8, v8, v9 -; RV32-NEXT: addi a1, zero, 63 +; RV32-NEXT: li a1, 63 ; RV32-NEXT: vsrl.vx v9, v8, a1 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) @@ -7554,7 +7554,7 @@ ; RV64-NEXT: slli a1, a1, 12 ; RV64-NEXT: addi a1, a1, 1366 ; RV64-NEXT: vmulh.vx v8, v8, a1 -; RV64-NEXT: addi a1, zero, 63 +; RV64-NEXT: li a1, 63 ; RV64-NEXT: vsrl.vx v9, v8, a1 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vse64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -71,7 +71,7 @@ define <3 x i1> @buildvec_mask_v1i1() { ; CHECK-LABEL: buildvec_mask_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define <3 x i1> @buildvec_mask_optsize_v1i1() optsize { ; CHECK-LABEL: buildvec_mask_optsize_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret @@ -91,7 +91,7 @@ define <4 x i1> @buildvec_mask_v4i1() { ; CHECK-LABEL: buildvec_mask_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 6 +; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret @@ -101,7 +101,7 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1(i1 %x, i1 %y) { ; CHECK-LABEL: buildvec_mask_nonconst_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 3 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a2 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu @@ -147,7 +147,7 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sb a1, 15(sp) -; CHECK-NEXT: addi a1, zero, 1 +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: sb a1, 14(sp) ; CHECK-NEXT: sb a0, 13(sp) ; CHECK-NEXT: sb zero, 12(sp) @@ -168,7 +168,7 @@ define <8 x i1> @buildvec_mask_v8i1() { ; CHECK-LABEL: buildvec_mask_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 182 +; CHECK-NEXT: li a0, 182 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) { ; CHECK-LABEL: buildvec_mask_nonconst_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 19 +; CHECK-NEXT: li a2, 19 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a2 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -208,7 +208,7 @@ ; CHECK-NEXT: sb a3, 13(sp) ; CHECK-NEXT: sb a0, 12(sp) ; CHECK-NEXT: sb a1, 11(sp) -; CHECK-NEXT: addi a1, zero, 1 +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: sb a1, 10(sp) ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) @@ -240,7 +240,7 @@ ; CHECK-NEXT: sb a3, 13(sp) ; CHECK-NEXT: sb a0, 12(sp) ; CHECK-NEXT: sb a1, 11(sp) -; CHECK-NEXT: addi a1, zero, 1 +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: sb a1, 10(sp) ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) @@ -296,7 +296,7 @@ define <10 x i1> @buildvec_mask_v10i1() { ; CHECK-LABEL: buildvec_mask_v10i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 949 +; CHECK-NEXT: li a0, 949 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret @@ -325,7 +325,7 @@ define <16 x i1> @buildvec_mask_v16i1_undefs() { ; CHECK-LABEL: buildvec_mask_v16i1_undefs: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1722 +; CHECK-NEXT: li a0, 1722 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret @@ -335,7 +335,7 @@ define <32 x i1> @buildvec_mask_v32i1() { ; RV32-LMULMAX1-LABEL: buildvec_mask_v32i1: ; RV32-LMULMAX1: # %bb.0: -; RV32-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV32-LMULMAX1-NEXT: li a0, 1776 ; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 11 @@ -345,7 +345,7 @@ ; ; RV64-LMULMAX1-LABEL: buildvec_mask_v32i1: ; RV64-LMULMAX1: # %bb.0: -; RV64-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV64-LMULMAX1-NEXT: li a0, 1776 ; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 11 @@ -406,7 +406,7 @@ define <64 x i1> @buildvec_mask_v64i1() { ; RV32-LMULMAX1-LABEL: buildvec_mask_v64i1: ; RV32-LMULMAX1: # %bb.0: -; RV32-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV32-LMULMAX1-NEXT: li a0, 1776 ; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 4 @@ -420,7 +420,7 @@ ; ; RV64-LMULMAX1-LABEL: buildvec_mask_v64i1: ; RV64-LMULMAX1: # %bb.0: -; RV64-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV64-LMULMAX1-NEXT: li a0, 1776 ; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 4 @@ -513,7 +513,7 @@ define <128 x i1> @buildvec_mask_v128i1() { ; RV32-LMULMAX1-LABEL: buildvec_mask_v128i1: ; RV32-LMULMAX1: # %bb.0: -; RV32-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV32-LMULMAX1-NEXT: li a0, 1776 ; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 11 @@ -535,7 +535,7 @@ ; ; RV64-LMULMAX1-LABEL: buildvec_mask_v128i1: ; RV64-LMULMAX1: # %bb.0: -; RV64-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV64-LMULMAX1-NEXT: li a0, 1776 ; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 11 @@ -685,7 +685,7 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize { ; RV32-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1: ; RV32-LMULMAX1: # %bb.0: -; RV32-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV32-LMULMAX1-NEXT: li a0, 1776 ; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 11 @@ -707,7 +707,7 @@ ; ; RV64-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1: ; RV64-LMULMAX1: # %bb.0: -; RV64-LMULMAX1-NEXT: addi a0, zero, 1776 +; RV64-LMULMAX1-NEXT: li a0, 1776 ; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 11 @@ -765,7 +765,7 @@ ; RV32-LMULMAX4: # %bb.0: ; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_0) ; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_0) -; RV32-LMULMAX4-NEXT: addi a1, zero, 64 +; RV32-LMULMAX4-NEXT: li a1, 64 ; RV32-LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; RV32-LMULMAX4-NEXT: vlm.v v0, (a0) ; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1) @@ -798,7 +798,7 @@ ; RV32-LMULMAX8: # %bb.0: ; RV32-LMULMAX8-NEXT: lui a0, %hi(.LCPI21_0) ; RV32-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0) -; RV32-LMULMAX8-NEXT: addi a1, zero, 128 +; RV32-LMULMAX8-NEXT: li a1, 128 ; RV32-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; RV32-LMULMAX8-NEXT: vlm.v v0, (a0) ; RV32-LMULMAX8-NEXT: ret @@ -807,7 +807,7 @@ ; RV64-LMULMAX8: # %bb.0: ; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0) -; RV64-LMULMAX8-NEXT: addi a1, zero, 128 +; RV64-LMULMAX8-NEXT: li a1, 128 ; RV64-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; RV64-LMULMAX8-NEXT: vlm.v v0, (a0) ; RV64-LMULMAX8-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll @@ -91,7 +91,7 @@ define void @load_store_v32i1(<32 x i1>* %x, <32 x i1>* %y) { ; LMULMAX2-LABEL: load_store_v32i1: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsm.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll @@ -39,7 +39,7 @@ define void @xor_v32i1(<32 x i1>* %x, <32 x i1>* %y) { ; CHECK-LABEL: xor_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) @@ -56,7 +56,7 @@ define void @not_v64i1(<64 x i1>* %x, <64 x i1>* %y) { ; CHECK-LABEL: not_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vmnand.mm v8, v8, v8 @@ -106,7 +106,7 @@ define void @xornot_v32i1(<32 x i1>* %x, <32 x i1>* %y) { ; CHECK-LABEL: xornot_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) @@ -158,7 +158,7 @@ define void @xnor_v32i1(<32 x i1>* %x, <32 x i1>* %y) { ; CHECK-LABEL: xnor_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -187,7 +187,7 @@ define void @splat_zeros_v32i1(<32 x i1>* %x) { ; LMULMAX2-LABEL: splat_zeros_v32i1: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi a1, zero, 32 +; LMULMAX2-NEXT: li a1, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmclr.m v8 ; LMULMAX2-NEXT: vsm.v v8, (a0) @@ -218,7 +218,7 @@ ; LMULMAX2-LABEL: splat_v32i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: andi a1, a1, 1 -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vmsne.vi v10, v8, 0 @@ -256,7 +256,7 @@ ; LMULMAX2-LABEL: splat_ones_v64i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a1, a0, 4 -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmset.m v8 ; LMULMAX2-NEXT: vsm.v v8, (a1) @@ -296,7 +296,7 @@ ; LMULMAX2-LABEL: splat_v64i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: andi a1, a1, 1 -; LMULMAX2-NEXT: addi a2, zero, 32 +; LMULMAX2-NEXT: li a2, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vmsne.vi v10, v8, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -2121,7 +2121,7 @@ define <32 x i8> @mgather_baseidx_v32i8(i8* %base, <32 x i8> %idxs, <32 x i1> %m, <32 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v32i8: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu @@ -2146,7 +2146,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: addi a0, zero, 32 +; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: vsetivli zero, 16, e8, m2, tu, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll @@ -325,7 +325,7 @@ define void @masked_load_v32f16(<32 x half>* %a, <32 x half>* %m_ptr, <32 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -344,7 +344,7 @@ define void @masked_load_v32f32(<32 x float>* %a, <32 x float>* %m_ptr, <32 x float>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -407,7 +407,7 @@ define void @masked_load_v64f16(<64 x half>* %a, <64 x half>* %m_ptr, <64 x half>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -427,7 +427,7 @@ ; CHECK-LABEL: masked_load_v64f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 -; CHECK-NEXT: addi a4, zero, 32 +; CHECK-NEXT: li a4, 32 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vle32.v v24, (a3) @@ -454,7 +454,7 @@ ; CHECK-LABEL: masked_load_v128f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 -; CHECK-NEXT: addi a4, zero, 64 +; CHECK-NEXT: li a4, 64 ; CHECK-NEXT: vsetvli zero, a4, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vle16.v v24, (a3) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll @@ -345,7 +345,7 @@ define void @masked_load_v32i8(<32 x i8>* %a, <32 x i8>* %m_ptr, <32 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -363,7 +363,7 @@ define void @masked_load_v32i16(<32 x i16>* %a, <32 x i16>* %m_ptr, <32 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -381,7 +381,7 @@ define void @masked_load_v32i32(<32 x i32>* %a, <32 x i32>* %m_ptr, <32 x i32>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -403,7 +403,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v16, (a3) ; RV32-NEXT: vle64.v v0, (a1) -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vmv.v.i v24, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -445,7 +445,7 @@ define void @masked_load_v64i8(<64 x i8>* %a, <64 x i8>* %m_ptr, <64 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -463,7 +463,7 @@ define void @masked_load_v64i16(<64 x i16>* %a, <64 x i16>* %m_ptr, <64 x i16>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -482,7 +482,7 @@ ; CHECK-LABEL: masked_load_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 -; CHECK-NEXT: addi a4, zero, 32 +; CHECK-NEXT: li a4, 32 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vle32.v v24, (a3) @@ -507,7 +507,7 @@ define void @masked_load_v128i8(<128 x i8>* %a, <128 x i8>* %m_ptr, <128 x i8>* %res_ptr) nounwind { ; CHECK-LABEL: masked_load_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -526,7 +526,7 @@ ; CHECK-LABEL: masked_load_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, 128 -; CHECK-NEXT: addi a4, zero, 128 +; CHECK-NEXT: li a4, 128 ; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vle8.v v24, (a3) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -1831,7 +1831,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, i8* %base, <32 x i8> %idxs, <32 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v32i8: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vsext.vf4 v16, v10 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll @@ -325,7 +325,7 @@ define void @masked_store_v32f16(<32 x half>* %val_ptr, <32 x half>* %a, <32 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v12, (a0) @@ -344,7 +344,7 @@ define void @masked_store_v32f32(<32 x float>* %val_ptr, <32 x float>* %a, <32 x float>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v16, (a0) @@ -451,7 +451,7 @@ define void @masked_store_v64f16(<64 x half>* %val_ptr, <64 x half>* %a, <64 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v16, (a0) @@ -474,7 +474,7 @@ ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 @@ -523,7 +523,7 @@ ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll @@ -345,7 +345,7 @@ define void @masked_store_v32i8(<32 x i8>* %val_ptr, <32 x i8>* %a, <32 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v10, (a0) @@ -363,7 +363,7 @@ define void @masked_store_v32i16(<32 x i16>* %val_ptr, <32 x i16>* %a, <32 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v12, (a0) @@ -381,7 +381,7 @@ define void @masked_store_v32i32(<32 x i32>* %val_ptr, <32 x i32>* %a, <32 x i32>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v16, (a0) @@ -412,7 +412,7 @@ ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vle64.v v24, (a2) -; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -489,7 +489,7 @@ define void @masked_store_v64i8(<64 x i8>* %val_ptr, <64 x i8>* %a, <64 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v12, (a0) @@ -507,7 +507,7 @@ define void @masked_store_v64i16(<64 x i16>* %val_ptr, <64 x i16>* %a, <64 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v16, (a0) @@ -529,7 +529,7 @@ ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 @@ -573,7 +573,7 @@ define void @masked_store_v128i8(<128 x i8>* %val_ptr, <128 x i8>* %a, <128 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v16, (a0) @@ -595,7 +595,7 @@ ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 @@ -643,7 +643,7 @@ ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -186,7 +186,7 @@ define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) { ; RV32-LABEL: vreduce_fadd_v32f16: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: lui a2, %hi(.LCPI10_0) ; RV32-NEXT: flh ft0, %lo(.LCPI10_0)(a2) ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu @@ -203,7 +203,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI10_0) ; RV64-NEXT: flh ft0, %lo(.LCPI10_0)(a1) -; RV64-NEXT: addi a1, zero, 32 +; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -221,7 +221,7 @@ define half @vreduce_ord_fadd_v32f16(<32 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -240,7 +240,7 @@ define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) { ; RV32-LABEL: vreduce_fadd_v64f16: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 64 +; RV32-NEXT: li a1, 64 ; RV32-NEXT: lui a2, %hi(.LCPI12_0) ; RV32-NEXT: flh ft0, %lo(.LCPI12_0)(a2) ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu @@ -257,7 +257,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI12_0) ; RV64-NEXT: flh ft0, %lo(.LCPI12_0)(a1) -; RV64-NEXT: addi a1, zero, 64 +; RV64-NEXT: li a1, 64 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -275,7 +275,7 @@ define half @vreduce_ord_fadd_v64f16(<64 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -294,7 +294,7 @@ define half @vreduce_fadd_v128f16(<128 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v128f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -318,7 +318,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_v128f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v16, (a0) @@ -522,7 +522,7 @@ define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) { ; RV32-LABEL: vreduce_fadd_v32f32: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: lui a2, %hi(.LCPI26_0) ; RV32-NEXT: flw ft0, %lo(.LCPI26_0)(a2) ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu @@ -539,7 +539,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI26_0) ; RV64-NEXT: flw ft0, %lo(.LCPI26_0)(a1) -; RV64-NEXT: addi a1, zero, 32 +; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -557,7 +557,7 @@ define float @vreduce_ord_fadd_v32f32(<32 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_v32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -576,7 +576,7 @@ define float @vreduce_fadd_v64f32(<64 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v64f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -600,7 +600,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_v64f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v16, (a0) @@ -927,7 +927,7 @@ define half @vreduce_fmin_v128f16(<128 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v128f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1027,7 +1027,7 @@ define float @vreduce_fmin_v128f32(<128 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v128f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: addi a2, a0, 384 ; CHECK-NEXT: vle32.v v8, (a2) @@ -1232,7 +1232,7 @@ define half @vreduce_fmax_v128f16(<128 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v128f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1332,7 +1332,7 @@ define float @vreduce_fmax_v128f32(<128 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v128f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: addi a2, a0, 384 ; CHECK-NEXT: vle32.v v8, (a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -847,7 +847,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -881,7 +881,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -915,7 +915,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -949,7 +949,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -983,7 +983,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1017,7 +1017,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1051,7 +1051,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1085,7 +1085,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1119,7 +1119,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1153,7 +1153,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1187,7 +1187,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1221,7 +1221,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1255,7 +1255,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1289,7 +1289,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1323,7 +1323,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1357,7 +1357,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -93,7 +93,7 @@ define i8 @vreduce_add_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -112,7 +112,7 @@ define i8 @vreduce_add_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -131,7 +131,7 @@ define i8 @vreduce_add_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -150,7 +150,7 @@ define i8 @vreduce_add_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -258,7 +258,7 @@ define i16 @vreduce_add_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -277,7 +277,7 @@ define i16 @vreduce_add_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -296,7 +296,7 @@ define i16 @vreduce_add_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -404,7 +404,7 @@ define i32 @vreduce_add_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -423,7 +423,7 @@ define i32 @vreduce_add_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -447,7 +447,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -476,7 +476,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -509,7 +509,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -542,7 +542,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -575,7 +575,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -611,7 +611,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -656,7 +656,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -777,7 +777,7 @@ define i8 @vreduce_and_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -796,7 +796,7 @@ define i8 @vreduce_and_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -815,7 +815,7 @@ define i8 @vreduce_and_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -834,7 +834,7 @@ define i8 @vreduce_and_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -942,7 +942,7 @@ define i16 @vreduce_and_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -961,7 +961,7 @@ define i16 @vreduce_and_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -980,7 +980,7 @@ define i16 @vreduce_and_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1088,7 +1088,7 @@ define i32 @vreduce_and_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -1107,7 +1107,7 @@ define i32 @vreduce_and_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1131,7 +1131,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -1160,7 +1160,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1193,7 +1193,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1226,7 +1226,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1259,7 +1259,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1295,7 +1295,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1340,7 +1340,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1461,7 +1461,7 @@ define i8 @vreduce_or_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -1480,7 +1480,7 @@ define i8 @vreduce_or_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -1499,7 +1499,7 @@ define i8 @vreduce_or_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -1518,7 +1518,7 @@ define i8 @vreduce_or_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1626,7 +1626,7 @@ define i16 @vreduce_or_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -1645,7 +1645,7 @@ define i16 @vreduce_or_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -1664,7 +1664,7 @@ define i16 @vreduce_or_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1772,7 +1772,7 @@ define i32 @vreduce_or_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -1791,7 +1791,7 @@ define i32 @vreduce_or_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -1815,7 +1815,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -1844,7 +1844,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1910,7 +1910,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1943,7 +1943,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1979,7 +1979,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2024,7 +2024,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2145,7 +2145,7 @@ define i8 @vreduce_xor_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -2164,7 +2164,7 @@ define i8 @vreduce_xor_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -2183,7 +2183,7 @@ define i8 @vreduce_xor_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -2202,7 +2202,7 @@ define i8 @vreduce_xor_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -2310,7 +2310,7 @@ define i16 @vreduce_xor_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -2329,7 +2329,7 @@ define i16 @vreduce_xor_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -2348,7 +2348,7 @@ define i16 @vreduce_xor_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -2456,7 +2456,7 @@ define i32 @vreduce_xor_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -2475,7 +2475,7 @@ define i32 @vreduce_xor_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -2499,7 +2499,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -2528,7 +2528,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2561,7 +2561,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2594,7 +2594,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2627,7 +2627,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2663,7 +2663,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2708,7 +2708,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2759,7 +2759,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu @@ -2778,7 +2778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu @@ -2797,7 +2797,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -2816,7 +2816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu @@ -2833,10 +2833,10 @@ define i8 @vreduce_smin_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -2853,10 +2853,10 @@ define i8 @vreduce_smin_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -2873,10 +2873,10 @@ define i8 @vreduce_smin_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -2893,13 +2893,13 @@ define i8 @vreduce_smin_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -3062,7 +3062,7 @@ define i16 @vreduce_smin_v32i16(<32 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v32i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 @@ -3076,7 +3076,7 @@ ; ; RV64-LABEL: vreduce_smin_v32i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, zero, 32 +; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 @@ -3097,7 +3097,7 @@ define i16 @vreduce_smin_v64i16(<64 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v64i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 64 +; RV32-NEXT: li a1, 64 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 @@ -3111,7 +3111,7 @@ ; ; RV64-LABEL: vreduce_smin_v64i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, zero, 64 +; RV64-NEXT: li a1, 64 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 @@ -3132,7 +3132,7 @@ define i16 @vreduce_smin_v128i16(<128 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v128i16: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 64 +; RV32-NEXT: li a1, 64 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 @@ -3149,7 +3149,7 @@ ; ; RV64-LABEL: vreduce_smin_v128i16: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, zero, 64 +; RV64-NEXT: li a1, 64 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 @@ -3319,7 +3319,7 @@ define i32 @vreduce_smin_v32i32(<32 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v32i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 @@ -3333,7 +3333,7 @@ ; ; RV64-LABEL: vreduce_smin_v32i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, zero, 32 +; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 @@ -3354,7 +3354,7 @@ define i32 @vreduce_smin_v64i32(<64 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v64i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 @@ -3371,7 +3371,7 @@ ; ; RV64-LABEL: vreduce_smin_v64i32: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, zero, 32 +; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 @@ -3397,7 +3397,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -3423,7 +3423,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -3434,7 +3434,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -3445,7 +3445,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 @@ -3467,7 +3467,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -3478,7 +3478,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -3489,7 +3489,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 @@ -3511,7 +3511,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -3522,7 +3522,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -3533,7 +3533,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 @@ -3555,7 +3555,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -3566,7 +3566,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -3577,7 +3577,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 @@ -3601,7 +3601,7 @@ ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -3613,7 +3613,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -3627,7 +3627,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmin.vv v8, v8, v16 -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 @@ -3654,7 +3654,7 @@ ; RV32-NEXT: vle64.v v24, (a1) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v0, (a0) -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -3668,7 +3668,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -3688,7 +3688,7 @@ ; RV64-NEXT: vmin.vv v16, v24, v16 ; RV64-NEXT: vmin.vv v8, v8, v0 ; RV64-NEXT: vmin.vv v8, v8, v16 -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 @@ -3722,7 +3722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu @@ -3760,7 +3760,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -3779,7 +3779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu @@ -3796,10 +3796,10 @@ define i8 @vreduce_smax_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -3816,10 +3816,10 @@ define i8 @vreduce_smax_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu @@ -3836,10 +3836,10 @@ define i8 @vreduce_smax_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -3856,13 +3856,13 @@ define i8 @vreduce_smax_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu @@ -3969,7 +3969,7 @@ define i16 @vreduce_smax_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 @@ -3989,7 +3989,7 @@ define i16 @vreduce_smax_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 @@ -4009,7 +4009,7 @@ define i16 @vreduce_smax_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -4122,7 +4122,7 @@ define i32 @vreduce_smax_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 @@ -4142,7 +4142,7 @@ define i32 @vreduce_smax_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -4167,7 +4167,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -4202,7 +4202,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4213,7 +4213,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 @@ -4244,7 +4244,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4255,7 +4255,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 @@ -4286,7 +4286,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4297,7 +4297,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 @@ -4328,7 +4328,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4339,7 +4339,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 @@ -4373,7 +4373,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4387,7 +4387,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmax.vv v8, v8, v16 -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 @@ -4426,7 +4426,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4446,7 +4446,7 @@ ; RV64-NEXT: vmax.vv v16, v24, v16 ; RV64-NEXT: vmax.vv v8, v8, v0 ; RV64-NEXT: vmax.vv v8, v8, v16 -; RV64-NEXT: addi a0, zero, -1 +; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 @@ -4550,7 +4550,7 @@ define i8 @vreduce_umin_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -4569,7 +4569,7 @@ define i8 @vreduce_umin_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -4588,7 +4588,7 @@ define i8 @vreduce_umin_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -4607,7 +4607,7 @@ define i8 @vreduce_umin_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -4715,7 +4715,7 @@ define i16 @vreduce_umin_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -4734,7 +4734,7 @@ define i16 @vreduce_umin_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -4753,7 +4753,7 @@ define i16 @vreduce_umin_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -4861,7 +4861,7 @@ define i32 @vreduce_umin_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -4880,7 +4880,7 @@ define i32 @vreduce_umin_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -4904,7 +4904,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -4933,7 +4933,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4966,7 +4966,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -4999,7 +4999,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5032,7 +5032,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5068,7 +5068,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5113,7 +5113,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5234,7 +5234,7 @@ define i8 @vreduce_umax_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -5253,7 +5253,7 @@ define i8 @vreduce_umax_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -5272,7 +5272,7 @@ define i8 @vreduce_umax_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu @@ -5291,7 +5291,7 @@ define i8 @vreduce_umax_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v256i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -5399,7 +5399,7 @@ define i16 @vreduce_umax_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -5418,7 +5418,7 @@ define i16 @vreduce_umax_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu @@ -5437,7 +5437,7 @@ define i16 @vreduce_umax_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v128i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 64 +; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -5545,7 +5545,7 @@ define i32 @vreduce_umax_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu @@ -5564,7 +5564,7 @@ define i32 @vreduce_umax_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 @@ -5588,7 +5588,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -5617,7 +5617,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5650,7 +5650,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5683,7 +5683,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5716,7 +5716,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5752,7 +5752,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -5797,7 +5797,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -401,12 +401,12 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v258i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: addi a3, a1, -128 ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: bltu a1, a3, .LBB31_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a3 @@ -416,7 +416,7 @@ ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: bltu a1, a2, .LBB31_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: .LBB31_4: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 @@ -432,17 +432,17 @@ ; CHECK-LABEL: vadd_vi_v258i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -128 -; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: bltu a0, a1, .LBB32_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: addi a0, zero, 128 +; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: .LBB32_4: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 @@ -460,7 +460,7 @@ define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vadd_vi_v258i8_evl129: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 128 +; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t @@ -479,7 +479,7 @@ define <256 x i8> @vadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vadd_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 128 +; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret @@ -1521,10 +1521,10 @@ ; RV32-LABEL: vadd_vx_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: mv a1, zero +; RV32-NEXT: li a1, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: vmv.v.i v24, -1 @@ -1533,11 +1533,11 @@ ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB107_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; RV32-NEXT: addi a1, zero, 16 +; RV32-NEXT: li a1, 16 ; RV32-NEXT: vadd.vv v16, v16, v24, v0.t ; RV32-NEXT: bltu a0, a1, .LBB107_4 ; RV32-NEXT: # %bb.3: -; RV32-NEXT: addi a0, zero, 16 +; RV32-NEXT: li a0, 16 ; RV32-NEXT: .LBB107_4: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 @@ -1547,7 +1547,7 @@ ; RV64-LABEL: vadd_vx_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: mv a1, zero +; RV64-NEXT: li a1, 0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV64-NEXT: addi a2, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 @@ -1556,11 +1556,11 @@ ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB107_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; RV64-NEXT: addi a1, zero, 16 +; RV64-NEXT: li a1, 16 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t ; RV64-NEXT: bltu a0, a1, .LBB107_4 ; RV64-NEXT: # %bb.3: -; RV64-NEXT: addi a0, zero, 16 +; RV64-NEXT: li a0, 16 ; RV64-NEXT: .LBB107_4: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 @@ -1575,8 +1575,8 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vadd_vi_v32i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: mv a1, zero -; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: vmv.v.i v24, -1 @@ -1585,11 +1585,11 @@ ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB108_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; RV32-NEXT: addi a1, zero, 16 +; RV32-NEXT: li a1, 16 ; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: bltu a0, a1, .LBB108_4 ; RV32-NEXT: # %bb.3: -; RV32-NEXT: addi a0, zero, 16 +; RV32-NEXT: li a0, 16 ; RV32-NEXT: .LBB108_4: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v24 @@ -1598,17 +1598,17 @@ ; RV64-LABEL: vadd_vi_v32i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: addi a1, a0, -16 -; RV64-NEXT: mv a2, zero +; RV64-NEXT: li a2, 0 ; RV64-NEXT: bltu a0, a1, .LBB108_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: .LBB108_2: ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV64-NEXT: addi a1, zero, 16 +; RV64-NEXT: li a1, 16 ; RV64-NEXT: vadd.vi v16, v16, -1 ; RV64-NEXT: bltu a0, a1, .LBB108_4 ; RV64-NEXT: # %bb.3: -; RV64-NEXT: addi a0, zero, 16 +; RV64-NEXT: li a0, 16 ; RV64-NEXT: .LBB108_4: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vadd.vi v8, v8, -1 @@ -1626,7 +1626,7 @@ define <32 x i64> @vadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) { ; RV32-LABEL: vadd_vx_v32i64_evl12: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, mu @@ -1649,7 +1649,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v1, v0, 2 -; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; RV32-NEXT: vmv.v.i v24, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -1268,7 +1268,7 @@ ; RV32-LABEL: vand_vx_v11i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v16, v0 -; RV32-NEXT: addi a3, zero, 32 +; RV32-NEXT: li a3, 32 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: lui a1, 341 @@ -1296,7 +1296,7 @@ define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vand_vx_v11i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: addi a3, zero, 32 +; RV32-NEXT: li a3, 32 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; RV32-NEXT: vmv.v.x v16, a1 ; RV32-NEXT: lui a1, 341 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 127 +; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -238,7 +238,7 @@ ; ; LMULMAX8-LABEL: vreduce_or_v32i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: snez a0, a0 @@ -262,7 +262,7 @@ ; ; LMULMAX8-LABEL: vreduce_xor_v32i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 @@ -286,7 +286,7 @@ ; ; LMULMAX8-LABEL: vreduce_and_v32i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 32 +; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 ; LMULMAX8-NEXT: vpopc.m a0, v8 @@ -313,7 +313,7 @@ ; ; LMULMAX8-LABEL: vreduce_or_v64i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 64 +; LMULMAX8-NEXT: li a0, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: snez a0, a0 @@ -339,7 +339,7 @@ ; ; LMULMAX8-LABEL: vreduce_xor_v64i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 64 +; LMULMAX8-NEXT: li a0, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 @@ -365,7 +365,7 @@ ; ; LMULMAX8-LABEL: vreduce_and_v64i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi a0, zero, 64 +; LMULMAX8-NEXT: li a0, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 ; LMULMAX8-NEXT: vpopc.m a0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 127 +; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -167,7 +167,7 @@ define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) { ; CHECK-LABEL: vselect_vv_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a4, zero, 32 +; CHECK-NEXT: li a4, 32 ; CHECK-NEXT: vsetvli zero, a4, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vlm.v v0, (a2) @@ -186,7 +186,7 @@ define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) { ; CHECK-LABEL: vselect_vx_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v8, (a0) @@ -205,7 +205,7 @@ define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) { ; CHECK-LABEL: vselect_vfpzero_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v8, (a0) @@ -272,7 +272,7 @@ define <32 x i1> @vselect_v32i1(<32 x i1> %a, <32 x i1> %b, <32 x i1> %cc) { ; CHECK-LABEL: vselect_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 @@ -285,7 +285,7 @@ define <64 x i1> @vselect_v64i1(<64 x i1> %a, <64 x i1> %b, <64 x i1> %cc) { ; CHECK-LABEL: vselect_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 64 +; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 127 +; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 127 +; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 127 +; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll @@ -31,7 +31,7 @@ define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) { ; CHECK-LABEL: ssub_v2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) { ; CHECK-LABEL: ssub_v4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) { ; CHECK-LABEL: ssub_v8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -142,7 +142,7 @@ define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) { ; CHECK-LABEL: ssub_v16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -179,7 +179,7 @@ define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) { ; CHECK-LABEL: ssub_v2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) { ; CHECK-LABEL: ssub_v4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) { ; CHECK-LABEL: ssub_v8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) { ; CHECK-LABEL: ssub_v16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -327,7 +327,7 @@ define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) { ; CHECK-LABEL: ssub_v2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -364,7 +364,7 @@ define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) { ; CHECK-LABEL: ssub_v4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) { ; CHECK-LABEL: ssub_v8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -438,7 +438,7 @@ define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) { ; CHECK-LABEL: ssub_v16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) { ; CHECK-LABEL: ssub_v2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) { ; CHECK-LABEL: ssub_v4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) { ; CHECK-LABEL: ssub_v8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -638,7 +638,7 @@ define <16 x i64> @ssub_v16i64_vi(<16 x i64> %va) { ; CHECK-LABEL: ssub_v16i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll @@ -31,7 +31,7 @@ define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) { ; CHECK-LABEL: usub_v2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) { ; CHECK-LABEL: usub_v4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) { ; CHECK-LABEL: usub_v8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -142,7 +142,7 @@ define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) { ; CHECK-LABEL: usub_v16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -179,7 +179,7 @@ define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) { ; CHECK-LABEL: usub_v2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) { ; CHECK-LABEL: usub_v4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) { ; CHECK-LABEL: usub_v8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) { ; CHECK-LABEL: usub_v16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -327,7 +327,7 @@ define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) { ; CHECK-LABEL: usub_v2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -364,7 +364,7 @@ define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) { ; CHECK-LABEL: usub_v4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) { ; CHECK-LABEL: usub_v8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -438,7 +438,7 @@ define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) { ; CHECK-LABEL: usub_v16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) { ; CHECK-LABEL: usub_v2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) { ; CHECK-LABEL: usub_v4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) { ; CHECK-LABEL: usub_v8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -638,7 +638,7 @@ define <16 x i64> @usub_v16i64_vi(<16 x i64> %va) { ; CHECK-LABEL: usub_v16i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll @@ -158,7 +158,7 @@ define <32 x i16> @vwmacc_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) { ; CHECK-LABEL: vwmacc_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) @@ -210,7 +210,7 @@ define <64 x i16> @vwmacc_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) { ; CHECK-LABEL: vwmacc_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) @@ -228,7 +228,7 @@ define <32 x i32> @vwmacc_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) { ; CHECK-LABEL: vwmacc_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) @@ -416,7 +416,7 @@ define <32 x i16> @vwmacc_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) { ; CHECK-LABEL: vwmacc_vx_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 @@ -468,7 +468,7 @@ define <64 x i16> @vwmacc_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) { ; CHECK-LABEL: vwmacc_vx_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 @@ -486,7 +486,7 @@ define <32 x i32> @vwmacc_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) { ; CHECK-LABEL: vwmacc_vx_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll @@ -158,7 +158,7 @@ define <32 x i16> @vwmaccu_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) { ; CHECK-LABEL: vwmaccu_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) @@ -210,7 +210,7 @@ define <64 x i16> @vwmaccu_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) { ; CHECK-LABEL: vwmaccu_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) @@ -228,7 +228,7 @@ define <32 x i32> @vwmaccu_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) { ; CHECK-LABEL: vwmaccu_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) @@ -416,7 +416,7 @@ define <32 x i16> @vwmaccu_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) { ; CHECK-LABEL: vwmaccu_vx_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 @@ -468,7 +468,7 @@ define <64 x i16> @vwmaccu_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) { ; CHECK-LABEL: vwmaccu_vx_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 @@ -486,7 +486,7 @@ define <32 x i32> @vwmaccu_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) { ; CHECK-LABEL: vwmaccu_vx_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -149,7 +149,7 @@ define <32 x i16> @vwmul_v32i16(<32 x i8>* %x, <32 x i8>* %y) { ; CHECK-LABEL: vwmul_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) @@ -198,7 +198,7 @@ define <64 x i16> @vwmul_v64i16(<64 x i8>* %x, <64 x i8>* %y) { ; CHECK-LABEL: vwmul_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) @@ -215,7 +215,7 @@ define <32 x i32> @vwmul_v32i32(<32 x i16>* %x, <32 x i16>* %y) { ; CHECK-LABEL: vwmul_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) @@ -253,11 +253,11 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) -; CHECK-NEXT: addi a0, zero, 64 +; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 @@ -289,11 +289,11 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 @@ -325,7 +325,7 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) @@ -553,7 +553,7 @@ define <32 x i16> @vwmul_vx_v32i16(<32 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmul_vx_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 @@ -602,7 +602,7 @@ define <64 x i16> @vwmul_vx_v64i16(<64 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmul_vx_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 @@ -619,7 +619,7 @@ define <32 x i32> @vwmul_vx_v32i32(<32 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmul_vx_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -149,7 +149,7 @@ define <32 x i16> @vwmulu_v32i16(<32 x i8>* %x, <32 x i8>* %y) { ; CHECK-LABEL: vwmulu_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) @@ -198,7 +198,7 @@ define <64 x i16> @vwmulu_v64i16(<64 x i8>* %x, <64 x i8>* %y) { ; CHECK-LABEL: vwmulu_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) @@ -215,7 +215,7 @@ define <32 x i32> @vwmulu_v32i32(<32 x i16>* %x, <32 x i16>* %y) { ; CHECK-LABEL: vwmulu_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) @@ -253,11 +253,11 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) -; CHECK-NEXT: addi a0, zero, 64 +; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 @@ -289,11 +289,11 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 @@ -325,7 +325,7 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) @@ -553,7 +553,7 @@ define <32 x i16> @vwmulu_vx_v32i16(<32 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulu_vx_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 @@ -602,7 +602,7 @@ define <64 x i16> @vwmulu_vx_v64i16(<64 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulu_vx_v64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 @@ -619,7 +619,7 @@ define <32 x i32> @vwmulu_vx_v32i32(<32 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulu_vx_v32i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -808,7 +808,7 @@ define @insertelt_nxv2i64_0_c10( %v) { ; CHECK-LABEL: insertelt_nxv2i64_0_c10: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 10 +; CHECK-NEXT: li a0, 10 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret @@ -819,7 +819,7 @@ define @insertelt_nxv2i64_imm_c10( %v) { ; CHECK-LABEL: insertelt_nxv2i64_imm_c10: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 10 +; CHECK-NEXT: li a0, 10 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu @@ -832,7 +832,7 @@ define @insertelt_nxv2i64_idx_c10( %v, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i64_idx_c10: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, 10 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 @@ -846,7 +846,7 @@ define @insertelt_nxv2i64_0_cn1( %v) { ; CHECK-LABEL: insertelt_nxv2i64_0_cn1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret @@ -857,7 +857,7 @@ define @insertelt_nxv2i64_imm_cn1( %v) { ; CHECK-LABEL: insertelt_nxv2i64_imm_cn1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu @@ -870,7 +870,7 @@ define @insertelt_nxv2i64_idx_cn1( %v, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i64_idx_cn1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, zero, -1 +; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll @@ -5,11 +5,11 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16>* %1) { ; RV64-1024-LABEL: interleave256: ; RV64-1024: # %bb.0: # %entry -; RV64-1024-NEXT: addi a3, zero, 128 +; RV64-1024-NEXT: li a3, 128 ; RV64-1024-NEXT: vsetvli zero, a3, e16, m2, ta, mu ; RV64-1024-NEXT: vle16.v v12, (a1) ; RV64-1024-NEXT: vle16.v v16, (a2) -; RV64-1024-NEXT: addi a1, zero, 256 +; RV64-1024-NEXT: li a1, 256 ; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-1024-NEXT: vmv.v.i v8, 0 ; RV64-1024-NEXT: vsetvli zero, a3, e16, m4, tu, mu @@ -53,11 +53,11 @@ ; ; RV64-2048-LABEL: interleave256: ; RV64-2048: # %bb.0: # %entry -; RV64-2048-NEXT: addi a3, zero, 128 +; RV64-2048-NEXT: li a3, 128 ; RV64-2048-NEXT: vsetvli zero, a3, e16, m1, ta, mu ; RV64-2048-NEXT: vle16.v v10, (a1) ; RV64-2048-NEXT: vle16.v v12, (a2) -; RV64-2048-NEXT: addi a1, zero, 256 +; RV64-2048-NEXT: li a1, 256 ; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-2048-NEXT: vmv.v.i v8, 0 ; RV64-2048-NEXT: vsetvli zero, a3, e16, m2, tu, mu @@ -114,10 +114,10 @@ ; RV64-1024-NEXT: addi sp, sp, -16 ; RV64-1024-NEXT: .cfi_def_cfa_offset 16 ; RV64-1024-NEXT: csrr a3, vlenb -; RV64-1024-NEXT: addi a4, zero, 40 +; RV64-1024-NEXT: li a4, 40 ; RV64-1024-NEXT: mul a3, a3, a4 ; RV64-1024-NEXT: sub sp, sp, a3 -; RV64-1024-NEXT: addi a3, zero, 256 +; RV64-1024-NEXT: li a3, 256 ; RV64-1024-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV64-1024-NEXT: vle16.v v16, (a1) ; RV64-1024-NEXT: vle16.v v8, (a2) @@ -126,7 +126,7 @@ ; RV64-1024-NEXT: add a1, sp, a1 ; RV64-1024-NEXT: addi a1, a1, 16 ; RV64-1024-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-1024-NEXT: addi a1, zero, 512 +; RV64-1024-NEXT: li a1, 512 ; RV64-1024-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-1024-NEXT: vmv.v.i v8, 0 ; RV64-1024-NEXT: csrr a2, vlenb @@ -146,7 +146,7 @@ ; RV64-1024-NEXT: vid.v v24 ; RV64-1024-NEXT: vsrl.vi v16, v24, 1 ; RV64-1024-NEXT: csrr a2, vlenb -; RV64-1024-NEXT: addi a4, zero, 24 +; RV64-1024-NEXT: li a4, 24 ; RV64-1024-NEXT: mul a2, a2, a4 ; RV64-1024-NEXT: add a2, sp, a2 ; RV64-1024-NEXT: addi a2, a2, 16 @@ -212,7 +212,7 @@ ; RV64-1024-NEXT: vslideup.vi v0, v16, 7 ; RV64-1024-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-1024-NEXT: csrr a1, vlenb -; RV64-1024-NEXT: addi a2, zero, 24 +; RV64-1024-NEXT: li a2, 24 ; RV64-1024-NEXT: mul a1, a1, a2 ; RV64-1024-NEXT: add a1, sp, a1 ; RV64-1024-NEXT: addi a1, a1, 16 @@ -225,7 +225,7 @@ ; RV64-1024-NEXT: vrgather.vv v8, v24, v16, v0.t ; RV64-1024-NEXT: vse16.v v8, (a0) ; RV64-1024-NEXT: csrr a0, vlenb -; RV64-1024-NEXT: addi a1, zero, 40 +; RV64-1024-NEXT: li a1, 40 ; RV64-1024-NEXT: mul a0, a0, a1 ; RV64-1024-NEXT: add sp, sp, a0 ; RV64-1024-NEXT: addi sp, sp, 16 @@ -233,11 +233,11 @@ ; ; RV64-2048-LABEL: interleave512: ; RV64-2048: # %bb.0: # %entry -; RV64-2048-NEXT: addi a3, zero, 256 +; RV64-2048-NEXT: li a3, 256 ; RV64-2048-NEXT: vsetvli zero, a3, e16, m2, ta, mu ; RV64-2048-NEXT: vle16.v v12, (a1) ; RV64-2048-NEXT: vle16.v v16, (a2) -; RV64-2048-NEXT: addi a1, zero, 512 +; RV64-2048-NEXT: li a1, 512 ; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-2048-NEXT: vmv.v.i v8, 0 ; RV64-2048-NEXT: vsetvli zero, a3, e16, m4, tu, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -12,7 +12,7 @@ ; RV64IV-LABEL: callee: ; RV64IV: # %bb.0: ; RV64IV-NEXT: vl8r.v v24, (a0) -; RV64IV-NEXT: addi a0, zero, 1024 +; RV64IV-NEXT: li a0, 1024 ; RV64IV-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; RV64IV-NEXT: vmacc.vv v8, v16, v24 ; RV64IV-NEXT: ret @@ -39,7 +39,7 @@ ; RV64IV-NEXT: sub sp, sp, a0 ; RV64IV-NEXT: andi sp, sp, -64 ; RV64IV-NEXT: csrr a0, vlenb -; RV64IV-NEXT: addi a1, zero, 24 +; RV64IV-NEXT: li a1, 24 ; RV64IV-NEXT: mul a0, a0, a1 ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: addi a0, a0, 48 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -254,7 +254,7 @@ ; SPILL-O2: # %bb.0: # %entry ; SPILL-O2-NEXT: addi sp, sp, -16 ; SPILL-O2-NEXT: csrr a2, vlenb -; SPILL-O2-NEXT: addi a3, zero, 6 +; SPILL-O2-NEXT: li a3, 6 ; SPILL-O2-NEXT: mul a2, a2, a3 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -279,7 +279,7 @@ ; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; SPILL-O2-NEXT: csrr a0, vlenb -; SPILL-O2-NEXT: addi a1, zero, 6 +; SPILL-O2-NEXT: li a1, 6 ; SPILL-O2-NEXT: mul a0, a0, a1 ; SPILL-O2-NEXT: add sp, sp, a0 ; SPILL-O2-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -254,7 +254,7 @@ ; SPILL-O2: # %bb.0: # %entry ; SPILL-O2-NEXT: addi sp, sp, -16 ; SPILL-O2-NEXT: csrr a2, vlenb -; SPILL-O2-NEXT: addi a3, zero, 6 +; SPILL-O2-NEXT: li a3, 6 ; SPILL-O2-NEXT: mul a2, a2, a3 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -279,7 +279,7 @@ ; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; SPILL-O2-NEXT: csrr a0, vlenb -; SPILL-O2-NEXT: addi a1, zero, 6 +; SPILL-O2-NEXT: li a1, 6 ; SPILL-O2-NEXT: mul a0, a0, a1 ; SPILL-O2-NEXT: add sp, sp, a0 ; SPILL-O2-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -86,7 +86,7 @@ ; CHECK-NEXT: sub a0, s0, a0 ; CHECK-NEXT: addi a0, a0, -112 ; CHECK-NEXT: vs8r.v v8, (a0) -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: sw a0, -68(s0) ; CHECK-NEXT: sw a0, -72(s0) ; CHECK-NEXT: sw a0, -76(s0) @@ -137,7 +137,7 @@ ; CHECK-NEXT: sd t0, 0(sp) ; CHECK-NEXT: call lots_args ; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add sp, sp, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll @@ -5,7 +5,7 @@ define i32 @vscale_zero() nounwind { ; CHECK-LABEL: vscale_zero: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ret entry: %0 = call i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll @@ -8,13 +8,13 @@ define i64 @vscale_zero() nounwind { ; RV64-LABEL: vscale_zero: ; RV64: # %bb.0: # %entry -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; ; RV32-LABEL: vscale_zero: ; RV32: # %bb.0: # %entry -; RV32-NEXT: mv a0, zero -; RV32-NEXT: mv a1, zero +; RV32-NEXT: li a0, 0 +; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -33,7 +33,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 -; RV32-NEXT: mv a1, zero +; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -52,7 +52,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 -; RV32-NEXT: mv a1, zero +; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -73,7 +73,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a1, a0, 1 ; RV32-NEXT: add a0, a1, a0 -; RV32-NEXT: mv a1, zero +; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -97,7 +97,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 -; RV32-NEXT: mv a1, zero +; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret %a = call i64 @llvm.vscale.i64() %b = and i64 %a, 4294967295 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll @@ -325,7 +325,7 @@ define @icmp_ult_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -638,7 +638,7 @@ define @icmp_slt_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -1081,7 +1081,7 @@ define @icmp_ult_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -1380,7 +1380,7 @@ define @icmp_slt_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -1809,7 +1809,7 @@ define @icmp_ult_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -2108,7 +2108,7 @@ define @icmp_slt_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -2601,7 +2601,7 @@ define @icmp_ult_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -2951,7 +2951,7 @@ define @icmp_slt_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll @@ -339,7 +339,7 @@ define @icmp_ult_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -652,7 +652,7 @@ define @icmp_slt_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -1081,7 +1081,7 @@ define @icmp_ult_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -1380,7 +1380,7 @@ define @icmp_slt_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -1809,7 +1809,7 @@ define @icmp_ult_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -2108,7 +2108,7 @@ define @icmp_slt_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -2537,7 +2537,7 @@ define @icmp_ult_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret @@ -2836,7 +2836,7 @@ define @icmp_slt_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: li a0, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -5,7 +5,7 @@ define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_mul: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB0_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -41,7 +41,7 @@ define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_add: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB1_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -77,7 +77,7 @@ define void @sink_splat_sub(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_sub: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB2_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -113,7 +113,7 @@ define void @sink_splat_rsub(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_rsub: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB3_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -149,7 +149,7 @@ define void @sink_splat_and(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_and: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB4_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -185,7 +185,7 @@ define void @sink_splat_or(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_or: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB5_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -221,7 +221,7 @@ define void @sink_splat_xor(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_xor: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB6_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -259,13 +259,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB7_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB7_5 ; CHECK-NEXT: .LBB7_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -352,13 +352,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB8_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB8_5 ; CHECK-NEXT: .LBB8_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -445,13 +445,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB9_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB9_5 ; CHECK-NEXT: .LBB9_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -538,13 +538,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB10_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB10_5 ; CHECK-NEXT: .LBB10_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -631,13 +631,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB11_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB11_5 ; CHECK-NEXT: .LBB11_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -724,13 +724,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB12_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB12_5 ; CHECK-NEXT: .LBB12_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -817,13 +817,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB13_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB13_5 ; CHECK-NEXT: .LBB13_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -908,7 +908,7 @@ define void @sink_splat_shl(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_shl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB14_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -944,7 +944,7 @@ define void @sink_splat_lshr(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_lshr: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB15_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -980,7 +980,7 @@ define void @sink_splat_ashr(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_ashr: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB16_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1018,13 +1018,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB17_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB17_5 ; CHECK-NEXT: .LBB17_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -1111,13 +1111,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a7, a2, 1 -; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB18_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB18_5 ; CHECK-NEXT: .LBB18_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a3, a7 ; CHECK-NEXT: sub t0, a3, a6 ; CHECK-NEXT: slli a4, a2, 1 @@ -1204,13 +1204,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a7, a3, 1 -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: bgeu a2, a7, .LBB19_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: j .LBB19_5 ; CHECK-NEXT: .LBB19_2: # %vector.ph -; CHECK-NEXT: mv a4, zero +; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: remu a6, a2, a7 ; CHECK-NEXT: sub a2, a2, a6 ; CHECK-NEXT: slli a5, a3, 1 @@ -1296,7 +1296,7 @@ ; CHECK-LABEL: sink_splat_fmul: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB20_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1333,7 +1333,7 @@ ; CHECK-LABEL: sink_splat_fdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB21_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1370,7 +1370,7 @@ ; CHECK-LABEL: sink_splat_frdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB22_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1407,7 +1407,7 @@ ; CHECK-LABEL: sink_splat_fadd: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB23_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1444,7 +1444,7 @@ ; CHECK-LABEL: sink_splat_fsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB24_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1481,7 +1481,7 @@ ; CHECK-LABEL: sink_splat_frsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB25_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1519,14 +1519,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli a3, a7, 2 -; CHECK-NEXT: addi a4, zero, 1024 +; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: bgeu a4, a3, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: j .LBB26_5 ; CHECK-NEXT: .LBB26_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a4, a3 ; CHECK-NEXT: sub a1, a4, a6 ; CHECK-NEXT: mv a4, a0 @@ -1612,14 +1612,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli a3, a7, 2 -; CHECK-NEXT: addi a4, zero, 1024 +; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: bgeu a4, a3, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: j .LBB27_5 ; CHECK-NEXT: .LBB27_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a4, a3 ; CHECK-NEXT: sub a1, a4, a6 ; CHECK-NEXT: mv a4, a0 @@ -1705,14 +1705,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli a3, a7, 2 -; CHECK-NEXT: addi a4, zero, 1024 +; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: bgeu a4, a3, .LBB28_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: j .LBB28_5 ; CHECK-NEXT: .LBB28_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a4, a3 ; CHECK-NEXT: sub a1, a4, a6 ; CHECK-NEXT: mv a4, a0 @@ -1798,14 +1798,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli a3, a7, 2 -; CHECK-NEXT: addi a4, zero, 1024 +; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: bgeu a4, a3, .LBB29_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: j .LBB29_5 ; CHECK-NEXT: .LBB29_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a4, a3 ; CHECK-NEXT: sub a1, a4, a6 ; CHECK-NEXT: mv a4, a0 @@ -1891,14 +1891,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli a3, a7, 2 -; CHECK-NEXT: addi a4, zero, 1024 +; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: bgeu a4, a3, .LBB30_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: j .LBB30_5 ; CHECK-NEXT: .LBB30_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a4, a3 ; CHECK-NEXT: sub a1, a4, a6 ; CHECK-NEXT: mv a4, a0 @@ -1984,14 +1984,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli a3, a7, 2 -; CHECK-NEXT: addi a4, zero, 1024 +; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: bgeu a4, a3, .LBB31_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, zero +; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: j .LBB31_5 ; CHECK-NEXT: .LBB31_2: # %vector.ph -; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: remu a6, a4, a3 ; CHECK-NEXT: sub a1, a4, a6 ; CHECK-NEXT: mv a4, a0 @@ -2076,7 +2076,7 @@ ; CHECK-LABEL: sink_splat_fma: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a2 -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB32_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -2118,7 +2118,7 @@ ; CHECK-LABEL: sink_splat_fma_commute: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a2 -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: .LBB33_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -2161,15 +2161,15 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli t1, a7, 2 -; CHECK-NEXT: addi t0, zero, 1024 +; CHECK-NEXT: li t0, 1024 ; CHECK-NEXT: fmv.w.x ft0, a2 ; CHECK-NEXT: bgeu t0, t1, .LBB34_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB34_5 ; CHECK-NEXT: .LBB34_2: # %vector.ph -; CHECK-NEXT: mv a5, zero -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: remu a6, t0, t1 ; CHECK-NEXT: sub t0, t0, a6 ; CHECK-NEXT: .LBB34_3: # %vector.body @@ -2265,15 +2265,15 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a7, vlenb ; CHECK-NEXT: srli t1, a7, 2 -; CHECK-NEXT: addi t0, zero, 1024 +; CHECK-NEXT: li t0, 1024 ; CHECK-NEXT: fmv.w.x ft0, a2 ; CHECK-NEXT: bgeu t0, t1, .LBB35_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: li t0, 0 ; CHECK-NEXT: j .LBB35_5 ; CHECK-NEXT: .LBB35_2: # %vector.ph -; CHECK-NEXT: mv a5, zero -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: remu a6, t0, t1 ; CHECK-NEXT: sub t0, t0, a6 ; CHECK-NEXT: .LBB35_3: # %vector.body @@ -2372,7 +2372,7 @@ define void @sink_splat_icmp(i32* nocapture %x, i32 signext %y) { ; CHECK-LABEL: sink_splat_icmp: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB36_1: # %vector.body @@ -2411,7 +2411,7 @@ ; CHECK-LABEL: sink_splat_fcmp: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB37_1: # %vector.body diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -69,7 +69,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -210,7 +210,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -327,7 +327,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: addi a0, zero, 3 +; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: addi a0, zero, 7 +; RV32-NEXT: li a0, 7 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: lui a0, 797989 ; RV32-NEXT: addi a0, a0, -683 @@ -560,7 +560,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v8 -; RV32-NEXT: addi a0, zero, 3 +; RV32-NEXT: li a0, 3 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vadd.vv v16, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -570,7 +570,7 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vid.v v8 -; RV64-NEXT: addi a0, zero, 3 +; RV64-NEXT: li a0, 3 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a1, a0, 1 @@ -590,7 +590,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: addi a0, zero, 7 +; RV32-NEXT: li a0, 7 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: lui a0, 797989 ; RV32-NEXT: addi a0, a0, -683 @@ -601,7 +601,7 @@ ; RV32-NEXT: mul a1, a0, a1 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: srli a0, a0, 3 -; RV32-NEXT: addi a1, zero, 62 +; RV32-NEXT: li a1, 62 ; RV32-NEXT: mul a1, a0, a1 ; RV32-NEXT: lui a2, 92455 ; RV32-NEXT: addi a2, a2, -1368 diff --git a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll --- a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll @@ -83,7 +83,7 @@ define @test_urem_vec_even_divisor_eq1( %x) nounwind { ; RV32-LABEL: test_urem_vec_even_divisor_eq1: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-NEXT: vsub.vx v8, v8, a0 ; RV32-NEXT: lui a0, 1048571 @@ -101,7 +101,7 @@ ; ; RV64-LABEL: test_urem_vec_even_divisor_eq1: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: lui a0, 1048571 @@ -129,7 +129,7 @@ define @test_urem_vec_odd_divisor_eq1( %x) nounwind { ; RV32-LABEL: test_urem_vec_odd_divisor_eq1: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-NEXT: vsub.vx v8, v8, a0 ; RV32-NEXT: lui a0, 1048573 @@ -144,7 +144,7 @@ ; ; RV64-LABEL: test_urem_vec_odd_divisor_eq1: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: lui a0, 1048573 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -640,7 +640,7 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB49_2: -; CHECK-NEXT: mv a4, zero +; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, mu ; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu @@ -670,7 +670,7 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB50_2: -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vadd.vi v8, v8, -1 @@ -1533,7 +1533,7 @@ ; CHECK-LABEL: vadd_vi_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu @@ -1565,7 +1565,7 @@ define @vadd_vi_nxv32i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, m4, ta, mu @@ -1607,7 +1607,7 @@ ; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: mv a2, zero +; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a4, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll @@ -51,7 +51,7 @@ define @vand_vi_nxv1i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv1i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -110,7 +110,7 @@ define @vand_vi_nxv2i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv2i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -169,7 +169,7 @@ define @vand_vi_nxv4i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv4i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -228,7 +228,7 @@ define @vand_vi_nxv8i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -287,7 +287,7 @@ define @vand_vi_nxv16i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv16i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vand_vi_nxv32i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv32i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -405,7 +405,7 @@ define @vand_vi_nxv64i8_2( %va) { ; CHECK-LABEL: vand_vi_nxv64i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -464,7 +464,7 @@ define @vand_vi_nxv1i16_2( %va) { ; CHECK-LABEL: vand_vi_nxv1i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vand_vi_nxv2i16_2( %va) { ; CHECK-LABEL: vand_vi_nxv2i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -582,7 +582,7 @@ define @vand_vi_nxv4i16_2( %va) { ; CHECK-LABEL: vand_vi_nxv4i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -641,7 +641,7 @@ define @vand_vi_nxv8i16_2( %va) { ; CHECK-LABEL: vand_vi_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @vand_vi_nxv16i16_2( %va) { ; CHECK-LABEL: vand_vi_nxv16i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -759,7 +759,7 @@ define @vand_vi_nxv32i16_2( %va) { ; CHECK-LABEL: vand_vi_nxv32i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -818,7 +818,7 @@ define @vand_vi_nxv1i32_2( %va) { ; CHECK-LABEL: vand_vi_nxv1i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -877,7 +877,7 @@ define @vand_vi_nxv2i32_2( %va) { ; CHECK-LABEL: vand_vi_nxv2i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -936,7 +936,7 @@ define @vand_vi_nxv4i32_2( %va) { ; CHECK-LABEL: vand_vi_nxv4i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -995,7 +995,7 @@ define @vand_vi_nxv8i32_2( %va) { ; CHECK-LABEL: vand_vi_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1054,7 +1054,7 @@ define @vand_vi_nxv16i32_2( %va) { ; CHECK-LABEL: vand_vi_nxv16i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1126,7 +1126,7 @@ define @vand_vi_nxv1i64_2( %va) { ; CHECK-LABEL: vand_vi_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1198,7 +1198,7 @@ define @vand_vi_nxv2i64_2( %va) { ; CHECK-LABEL: vand_vi_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1270,7 +1270,7 @@ define @vand_vi_nxv4i64_2( %va) { ; CHECK-LABEL: vand_vi_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define @vand_vi_nxv8i64_2( %va) { ; CHECK-LABEL: vand_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll @@ -27,7 +27,7 @@ define @vdiv_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 @@ -90,7 +90,7 @@ define @vdiv_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 @@ -129,7 +129,7 @@ define @vdiv_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 @@ -168,7 +168,7 @@ define @vdiv_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 @@ -207,7 +207,7 @@ define @vdiv_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmulh.vx v10, v8, a0 ; CHECK-NEXT: vsub.vv v8, v10, v8 @@ -246,7 +246,7 @@ define @vdiv_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmulh.vx v12, v8, a0 ; CHECK-NEXT: vsub.vv v8, v12, v8 @@ -285,7 +285,7 @@ define @vdiv_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vdiv_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsub.vv v8, v16, v8 @@ -909,7 +909,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v9 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v9 @@ -928,7 +928,7 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v9 @@ -989,7 +989,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v10, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v10 @@ -1008,7 +1008,7 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v10 @@ -1069,7 +1069,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v12, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v12 @@ -1088,7 +1088,7 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v12 @@ -1149,7 +1149,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v16, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v16 @@ -1168,7 +1168,7 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmulh.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll @@ -27,7 +27,7 @@ define @vdivu_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -87,7 +87,7 @@ define @vdivu_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -123,7 +123,7 @@ define @vdivu_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -159,7 +159,7 @@ define @vdivu_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -195,7 +195,7 @@ define @vdivu_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -231,7 +231,7 @@ define @vdivu_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -267,7 +267,7 @@ define @vdivu_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vdivu_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 @@ -826,25 +826,25 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v9 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv1i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -922,25 +922,25 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v10 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv2i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1018,25 +1018,25 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v12 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv4i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1114,25 +1114,25 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v16 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv8i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -9,7 +9,7 @@ define @vdivu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 127 +; CHECK-NEXT: li a2, 127 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll @@ -29,7 +29,7 @@ define @vmax_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define @vmax_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define @vmax_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @vmax_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ define @vmax_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -219,7 +219,7 @@ define @vmax_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @vmax_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @vmax_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -333,7 +333,7 @@ define @vmax_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -371,7 +371,7 @@ define @vmax_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -409,7 +409,7 @@ define @vmax_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @vmax_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @vmax_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vmax_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -561,7 +561,7 @@ define @vmax_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @vmax_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -637,7 +637,7 @@ define @vmax_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ define @vmax_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -726,7 +726,7 @@ define @vmax_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -777,7 +777,7 @@ define @vmax_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @vmax_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -879,7 +879,7 @@ define @vmax_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll @@ -29,7 +29,7 @@ define @vmax_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define @vmax_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define @vmax_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @vmax_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ define @vmax_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -219,7 +219,7 @@ define @vmax_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @vmax_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vmax_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @vmax_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -333,7 +333,7 @@ define @vmax_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -371,7 +371,7 @@ define @vmax_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -409,7 +409,7 @@ define @vmax_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @vmax_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @vmax_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vmax_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vmax_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -561,7 +561,7 @@ define @vmax_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @vmax_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -637,7 +637,7 @@ define @vmax_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ define @vmax_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vmax_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -726,7 +726,7 @@ define @vmax_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -777,7 +777,7 @@ define @vmax_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @vmax_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -879,7 +879,7 @@ define @vmax_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vmax_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll @@ -29,7 +29,7 @@ define @vmin_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define @vmin_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define @vmin_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @vmin_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ define @vmin_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -219,7 +219,7 @@ define @vmin_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @vmin_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @vmin_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -333,7 +333,7 @@ define @vmin_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -371,7 +371,7 @@ define @vmin_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -409,7 +409,7 @@ define @vmin_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @vmin_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @vmin_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vmin_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -561,7 +561,7 @@ define @vmin_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @vmin_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -637,7 +637,7 @@ define @vmin_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ define @vmin_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -726,7 +726,7 @@ define @vmin_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -777,7 +777,7 @@ define @vmin_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @vmin_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -879,7 +879,7 @@ define @vmin_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll @@ -29,7 +29,7 @@ define @vmin_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define @vmin_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define @vmin_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @vmin_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ define @vmin_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -219,7 +219,7 @@ define @vmin_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @vmin_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vmin_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @vmin_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -333,7 +333,7 @@ define @vmin_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -371,7 +371,7 @@ define @vmin_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -409,7 +409,7 @@ define @vmin_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @vmin_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @vmin_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vmin_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vmin_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -561,7 +561,7 @@ define @vmin_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @vmin_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -637,7 +637,7 @@ define @vmin_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ define @vmin_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vmin_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -726,7 +726,7 @@ define @vmin_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -777,7 +777,7 @@ define @vmin_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @vmin_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -879,7 +879,7 @@ define @vmin_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vmin_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: li a0, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll @@ -27,7 +27,7 @@ define @vmul_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -62,7 +62,7 @@ define @vmul_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -97,7 +97,7 @@ define @vmul_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -132,7 +132,7 @@ define @vmul_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -167,7 +167,7 @@ define @vmul_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @vmul_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ define @vmul_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vmul_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -272,7 +272,7 @@ define @vmul_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vmul_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -307,7 +307,7 @@ define @vmul_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vmul_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -342,7 +342,7 @@ define @vmul_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vmul_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -377,7 +377,7 @@ define @vmul_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vmul_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -412,7 +412,7 @@ define @vmul_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vmul_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @vmul_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vmul_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -482,7 +482,7 @@ define @vmul_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vmul_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -517,7 +517,7 @@ define @vmul_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vmul_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -552,7 +552,7 @@ define @vmul_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vmul_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @vmul_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vmul_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -622,7 +622,7 @@ define @vmul_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vmul_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -670,7 +670,7 @@ define @vmul_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vmul_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -742,7 +742,7 @@ define @vmul_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vmul_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -814,7 +814,7 @@ define @vmul_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vmul_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -886,7 +886,7 @@ define @vmul_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vmul_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll @@ -7,10 +7,10 @@ define @srem_eq_fold_nxv4i8( %va) { ; CHECK-LABEL: srem_eq_fold_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 42 +; CHECK-NEXT: li a0, 42 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 -; CHECK-NEXT: addi a1, zero, -85 +; CHECK-NEXT: li a1, -85 ; CHECK-NEXT: vmacc.vx v9, a1, v8 ; CHECK-NEXT: vsll.vi v8, v9, 7 ; CHECK-NEXT: vsrl.vi v9, v9, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll @@ -240,7 +240,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv1i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v9, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v9 @@ -256,7 +256,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv2i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v10, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v10 @@ -272,7 +272,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv4i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v12, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v12 @@ -288,7 +288,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv8i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v16, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll @@ -41,7 +41,7 @@ define @vor_vx_nxv1i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv1i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @vor_vx_nxv2i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv2i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -139,7 +139,7 @@ define @vor_vx_nxv4i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv4i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -188,7 +188,7 @@ define @vor_vx_nxv8i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ define @vor_vx_nxv16i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv16i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define @vor_vx_nxv32i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv32i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -335,7 +335,7 @@ define @vor_vx_nxv64i8_2( %va) { ; CHECK-LABEL: vor_vx_nxv64i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -384,7 +384,7 @@ define @vor_vx_nxv1i16_2( %va) { ; CHECK-LABEL: vor_vx_nxv1i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -433,7 +433,7 @@ define @vor_vx_nxv2i16_2( %va) { ; CHECK-LABEL: vor_vx_nxv2i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -482,7 +482,7 @@ define @vor_vx_nxv4i16_2( %va) { ; CHECK-LABEL: vor_vx_nxv4i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -531,7 +531,7 @@ define @vor_vx_nxv8i16_2( %va) { ; CHECK-LABEL: vor_vx_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -580,7 +580,7 @@ define @vor_vx_nxv16i16_2( %va) { ; CHECK-LABEL: vor_vx_nxv16i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -629,7 +629,7 @@ define @vor_vx_nxv32i16_2( %va) { ; CHECK-LABEL: vor_vx_nxv32i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -678,7 +678,7 @@ define @vor_vx_nxv1i32_2( %va) { ; CHECK-LABEL: vor_vx_nxv1i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -727,7 +727,7 @@ define @vor_vx_nxv2i32_2( %va) { ; CHECK-LABEL: vor_vx_nxv2i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -776,7 +776,7 @@ define @vor_vx_nxv4i32_2( %va) { ; CHECK-LABEL: vor_vx_nxv4i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -825,7 +825,7 @@ define @vor_vx_nxv8i32_2( %va) { ; CHECK-LABEL: vor_vx_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @vor_vx_nxv16i32_2( %va) { ; CHECK-LABEL: vor_vx_nxv16i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -936,7 +936,7 @@ define @vor_vx_nxv1i64_2( %va) { ; CHECK-LABEL: vor_vx_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -998,7 +998,7 @@ define @vor_vx_nxv2i64_2( %va) { ; CHECK-LABEL: vor_vx_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1060,7 +1060,7 @@ define @vor_vx_nxv4i64_2( %va) { ; CHECK-LABEL: vor_vx_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1122,7 +1122,7 @@ define @vor_vx_nxv8i64_2( %va) { ; CHECK-LABEL: vor_vx_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll @@ -36,7 +36,7 @@ define signext i8 @vreduce_smax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu @@ -67,7 +67,7 @@ define signext i8 @vreduce_smin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu @@ -158,7 +158,7 @@ define signext i8 @vreduce_smax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu @@ -189,7 +189,7 @@ define signext i8 @vreduce_smin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu @@ -280,7 +280,7 @@ define signext i8 @vreduce_smax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu @@ -311,7 +311,7 @@ define signext i8 @vreduce_smin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu @@ -1098,7 +1098,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1116,7 +1116,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1140,7 +1140,7 @@ ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1159,7 +1159,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1175,7 +1175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 @@ -1185,7 +1185,7 @@ ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1204,7 +1204,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1222,7 +1222,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1240,7 +1240,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1259,7 +1259,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1278,7 +1278,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1303,7 +1303,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1323,7 +1323,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1339,7 +1339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 @@ -1350,7 +1350,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1370,7 +1370,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1389,7 +1389,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1408,7 +1408,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1427,7 +1427,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1446,7 +1446,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1471,7 +1471,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1491,7 +1491,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1507,7 +1507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 @@ -1518,7 +1518,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1538,7 +1538,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1557,7 +1557,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 @@ -1576,7 +1576,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll @@ -36,7 +36,7 @@ define signext i8 @vreduce_smax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu @@ -67,7 +67,7 @@ define signext i8 @vreduce_smin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu @@ -158,7 +158,7 @@ define signext i8 @vreduce_smax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu @@ -189,7 +189,7 @@ define signext i8 @vreduce_smin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu @@ -280,7 +280,7 @@ define signext i8 @vreduce_smax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -128 +; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu @@ -311,7 +311,7 @@ define signext i8 @vreduce_smin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu @@ -1122,7 +1122,7 @@ define i64 @vreduce_smax_nxv1i64( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: slli a0, a0, 63 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 @@ -1152,7 +1152,7 @@ define i64 @vreduce_smin_nxv1i64( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 @@ -1240,7 +1240,7 @@ define i64 @vreduce_smax_nxv2i64( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: slli a0, a0, 63 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 @@ -1272,7 +1272,7 @@ define i64 @vreduce_smin_nxv2i64( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 @@ -1364,7 +1364,7 @@ define i64 @vreduce_smax_nxv4i64( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: slli a0, a0, 63 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 @@ -1396,7 +1396,7 @@ define i64 @vreduce_smin_nxv4i64( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -1 +; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -1261,7 +1261,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1295,7 +1295,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1329,7 +1329,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1363,7 +1363,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1397,7 +1397,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1431,7 +1431,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1465,7 +1465,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1499,7 +1499,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1533,7 +1533,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1567,7 +1567,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1601,7 +1601,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1635,7 +1635,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1669,7 +1669,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1703,7 +1703,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1737,7 +1737,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1771,7 +1771,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1805,7 +1805,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1839,7 +1839,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1873,7 +1873,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1907,7 +1907,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1941,7 +1941,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -1975,7 +1975,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredand.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2009,7 +2009,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredor.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 @@ -2043,7 +2043,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 -; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -27,14 +27,14 @@ define @vrem_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 ; CHECK-NEXT: vsrl.vi v10, v9, 7 ; CHECK-NEXT: vadd.vv v9, v9, v10 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -68,14 +68,14 @@ define @vrem_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 ; CHECK-NEXT: vsrl.vi v10, v9, 7 ; CHECK-NEXT: vadd.vv v9, v9, v10 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -109,14 +109,14 @@ define @vrem_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 ; CHECK-NEXT: vsrl.vi v10, v9, 7 ; CHECK-NEXT: vadd.vv v9, v9, v10 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -150,14 +150,14 @@ define @vrem_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 ; CHECK-NEXT: vsrl.vi v10, v9, 7 ; CHECK-NEXT: vadd.vv v9, v9, v10 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -191,14 +191,14 @@ define @vrem_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmulh.vx v10, v8, a0 ; CHECK-NEXT: vsub.vv v10, v10, v8 ; CHECK-NEXT: vsra.vi v10, v10, 2 ; CHECK-NEXT: vsrl.vi v12, v10, 7 ; CHECK-NEXT: vadd.vv v10, v10, v12 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -232,14 +232,14 @@ define @vrem_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmulh.vx v12, v8, a0 ; CHECK-NEXT: vsub.vv v12, v12, v8 ; CHECK-NEXT: vsra.vi v12, v12, 2 ; CHECK-NEXT: vsrl.vi v16, v12, 7 ; CHECK-NEXT: vadd.vv v12, v12, v16 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -273,14 +273,14 @@ define @vrem_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vrem_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 109 +; CHECK-NEXT: li a0, 109 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsub.vv v16, v16, v8 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 7 ; CHECK-NEXT: vadd.vv v16, v16, v24 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -321,7 +321,7 @@ ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vsrl.vi v10, v9, 15 ; RV32-NEXT: vadd.vv v9, v9, v10 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -334,7 +334,7 @@ ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vsrl.vi v10, v9, 15 ; RV64-NEXT: vadd.vv v9, v9, v10 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -375,7 +375,7 @@ ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vsrl.vi v10, v9, 15 ; RV32-NEXT: vadd.vv v9, v9, v10 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -388,7 +388,7 @@ ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vsrl.vi v10, v9, 15 ; RV64-NEXT: vadd.vv v9, v9, v10 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -429,7 +429,7 @@ ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vsrl.vi v10, v9, 15 ; RV32-NEXT: vadd.vv v9, v9, v10 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -442,7 +442,7 @@ ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vsrl.vi v10, v9, 15 ; RV64-NEXT: vadd.vv v9, v9, v10 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -483,7 +483,7 @@ ; RV32-NEXT: vsra.vi v10, v10, 1 ; RV32-NEXT: vsrl.vi v12, v10, 15 ; RV32-NEXT: vadd.vv v10, v10, v12 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: ret ; @@ -496,7 +496,7 @@ ; RV64-NEXT: vsra.vi v10, v10, 1 ; RV64-NEXT: vsrl.vi v12, v10, 15 ; RV64-NEXT: vadd.vv v10, v10, v12 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -537,7 +537,7 @@ ; RV32-NEXT: vsra.vi v12, v12, 1 ; RV32-NEXT: vsrl.vi v16, v12, 15 ; RV32-NEXT: vadd.vv v12, v12, v16 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: ret ; @@ -550,7 +550,7 @@ ; RV64-NEXT: vsra.vi v12, v12, 1 ; RV64-NEXT: vsrl.vi v16, v12, 15 ; RV64-NEXT: vadd.vv v12, v12, v16 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -591,7 +591,7 @@ ; RV32-NEXT: vsra.vi v16, v16, 1 ; RV32-NEXT: vsrl.vi v24, v16, 15 ; RV32-NEXT: vadd.vv v16, v16, v24 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: ret ; @@ -604,7 +604,7 @@ ; RV64-NEXT: vsra.vi v16, v16, 1 ; RV64-NEXT: vsrl.vi v24, v16, 15 ; RV64-NEXT: vadd.vv v16, v16, v24 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -646,7 +646,7 @@ ; RV32-NEXT: vsrl.vi v10, v9, 31 ; RV32-NEXT: vsra.vi v9, v9, 2 ; RV32-NEXT: vadd.vv v9, v9, v10 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -660,7 +660,7 @@ ; RV64-NEXT: vsra.vi v9, v9, 2 ; RV64-NEXT: vsrl.vi v10, v9, 31 ; RV64-NEXT: vadd.vv v9, v9, v10 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -702,7 +702,7 @@ ; RV32-NEXT: vsrl.vi v10, v9, 31 ; RV32-NEXT: vsra.vi v9, v9, 2 ; RV32-NEXT: vadd.vv v9, v9, v10 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -716,7 +716,7 @@ ; RV64-NEXT: vsra.vi v9, v9, 2 ; RV64-NEXT: vsrl.vi v10, v9, 31 ; RV64-NEXT: vadd.vv v9, v9, v10 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -758,7 +758,7 @@ ; RV32-NEXT: vsrl.vi v12, v10, 31 ; RV32-NEXT: vsra.vi v10, v10, 2 ; RV32-NEXT: vadd.vv v10, v10, v12 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: ret ; @@ -772,7 +772,7 @@ ; RV64-NEXT: vsra.vi v10, v10, 2 ; RV64-NEXT: vsrl.vi v12, v10, 31 ; RV64-NEXT: vadd.vv v10, v10, v12 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -814,7 +814,7 @@ ; RV32-NEXT: vsrl.vi v16, v12, 31 ; RV32-NEXT: vsra.vi v12, v12, 2 ; RV32-NEXT: vadd.vv v12, v12, v16 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: ret ; @@ -828,7 +828,7 @@ ; RV64-NEXT: vsra.vi v12, v12, 2 ; RV64-NEXT: vsrl.vi v16, v12, 31 ; RV64-NEXT: vadd.vv v12, v12, v16 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -870,7 +870,7 @@ ; RV32-NEXT: vsrl.vi v24, v16, 31 ; RV32-NEXT: vsra.vi v16, v16, 2 ; RV32-NEXT: vadd.vv v16, v16, v24 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: ret ; @@ -884,7 +884,7 @@ ; RV64-NEXT: vsra.vi v16, v16, 2 ; RV64-NEXT: vsrl.vi v24, v16, 31 ; RV64-NEXT: vadd.vv v16, v16, v24 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -943,11 +943,11 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulh.vv v9, v8, v9 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v10, v9, a0 ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vadd.vv v9, v9, v10 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -964,11 +964,11 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmulh.vx v9, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v10, v9, a0 ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vadd.vv v9, v9, v10 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1027,11 +1027,11 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulh.vv v10, v8, v10 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v12, v10, a0 ; RV32-NEXT: vsra.vi v10, v10, 1 ; RV32-NEXT: vadd.vv v10, v10, v12 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1048,11 +1048,11 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmulh.vx v10, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v12, v10, a0 ; RV64-NEXT: vsra.vi v10, v10, 1 ; RV64-NEXT: vadd.vv v10, v10, v12 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1111,11 +1111,11 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulh.vv v12, v8, v12 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v16, v12, a0 ; RV32-NEXT: vsra.vi v12, v12, 1 ; RV32-NEXT: vadd.vv v12, v12, v16 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1132,11 +1132,11 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmulh.vx v12, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v16, v12, a0 ; RV64-NEXT: vsra.vi v12, v12, 1 ; RV64-NEXT: vadd.vv v12, v12, v16 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1195,11 +1195,11 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulh.vv v16, v8, v16 -; RV32-NEXT: addi a0, zero, 63 +; RV32-NEXT: li a0, 63 ; RV32-NEXT: vsrl.vx v24, v16, a0 ; RV32-NEXT: vsra.vi v16, v16, 1 ; RV32-NEXT: vadd.vv v16, v16, v24 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1216,11 +1216,11 @@ ; RV64-NEXT: addi a0, a0, 1755 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmulh.vx v16, v8, a0 -; RV64-NEXT: addi a0, zero, 63 +; RV64-NEXT: li a0, 63 ; RV64-NEXT: vsrl.vx v24, v16, a0 ; RV64-NEXT: vsra.vi v16, v16, 1 ; RV64-NEXT: vadd.vv v16, v16, v24 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll @@ -27,11 +27,11 @@ define @vremu_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -65,11 +65,11 @@ define @vremu_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -103,11 +103,11 @@ define @vremu_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -141,11 +141,11 @@ define @vremu_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -179,11 +179,11 @@ define @vremu_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v10, v10, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -217,11 +217,11 @@ define @vremu_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v12, v12, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -255,11 +255,11 @@ define @vremu_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vremu_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 33 +; CHECK-NEXT: li a0, 33 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v16, v16, 5 -; CHECK-NEXT: addi a0, zero, -7 +; CHECK-NEXT: li a0, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -298,7 +298,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 13 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -309,7 +309,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 13 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -348,7 +348,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 13 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -359,7 +359,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 13 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -398,7 +398,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 13 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -409,7 +409,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 13 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -448,7 +448,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32-NEXT: vmulhu.vx v10, v8, a0 ; RV32-NEXT: vsrl.vi v10, v10, 13 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: ret ; @@ -459,7 +459,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64-NEXT: vmulhu.vx v10, v8, a0 ; RV64-NEXT: vsrl.vi v10, v10, 13 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -498,7 +498,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32-NEXT: vmulhu.vx v12, v8, a0 ; RV32-NEXT: vsrl.vi v12, v12, 13 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: ret ; @@ -509,7 +509,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64-NEXT: vmulhu.vx v12, v8, a0 ; RV64-NEXT: vsrl.vi v12, v12, 13 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -548,7 +548,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vmulhu.vx v16, v8, a0 ; RV32-NEXT: vsrl.vi v16, v16, 13 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: ret ; @@ -559,7 +559,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vmulhu.vx v16, v8, a0 ; RV64-NEXT: vsrl.vi v16, v16, 13 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -598,7 +598,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 29 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -609,7 +609,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 29 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -648,7 +648,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 29 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: ret ; @@ -659,7 +659,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 29 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -698,7 +698,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32-NEXT: vmulhu.vx v10, v8, a0 ; RV32-NEXT: vsrl.vi v10, v10, 29 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: ret ; @@ -709,7 +709,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64-NEXT: vmulhu.vx v10, v8, a0 ; RV64-NEXT: vsrl.vi v10, v10, 29 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -748,7 +748,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vmulhu.vx v12, v8, a0 ; RV32-NEXT: vsrl.vi v12, v12, 29 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: ret ; @@ -759,7 +759,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64-NEXT: vmulhu.vx v12, v8, a0 ; RV64-NEXT: vsrl.vi v12, v12, 29 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -798,7 +798,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vmulhu.vx v16, v8, a0 ; RV32-NEXT: vsrl.vi v16, v16, 29 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: ret ; @@ -809,7 +809,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vmulhu.vx v16, v8, a0 ; RV64-NEXT: vsrl.vi v16, v16, 29 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -860,29 +860,29 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulhu.vv v9, v8, v9 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v9, v9, a0 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv1i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmulhu.vx v9, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v9, v9, a0 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -908,7 +908,7 @@ define @vremu_vi_nxv1i64_2( %va, %vb) { ; CHECK-LABEL: vremu_vi_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsll.vv v9, v10, v9 @@ -964,29 +964,29 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhu.vv v10, v8, v10 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v10, v10, a0 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv2i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmulhu.vx v10, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v10, v10, a0 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1012,7 +1012,7 @@ define @vremu_vi_nxv2i64_2( %va, %vb) { ; CHECK-LABEL: vremu_vi_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsll.vv v10, v12, v10 @@ -1068,29 +1068,29 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhu.vv v12, v8, v12 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v12, v12, a0 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv4i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmulhu.vx v12, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v12, v12, a0 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1116,7 +1116,7 @@ define @vremu_vi_nxv4i64_2( %va, %vb) { ; CHECK-LABEL: vremu_vi_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsll.vv v12, v16, v12 @@ -1172,29 +1172,29 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhu.vv v16, v8, v16 -; RV32-NEXT: addi a0, zero, 61 +; RV32-NEXT: li a0, 61 ; RV32-NEXT: vsrl.vx v16, v16, a0 -; RV32-NEXT: addi a0, zero, -7 +; RV32-NEXT: li a0, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv8i64_0: ; RV64: # %bb.0: -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 61 ; RV64-NEXT: addi a0, a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmulhu.vx v16, v8, a0 -; RV64-NEXT: addi a0, zero, 61 +; RV64-NEXT: li a0, 61 ; RV64-NEXT: vsrl.vx v16, v16, a0 -; RV64-NEXT: addi a0, zero, -7 +; RV64-NEXT: li a0, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 @@ -1220,7 +1220,7 @@ define @vremu_vi_nxv8i64_2( %va, %vb) { ; CHECK-LABEL: vremu_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vsll.vv v16, v24, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -9,7 +9,7 @@ define @vremu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 127 +; CHECK-NEXT: li a2, 127 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll @@ -461,7 +461,7 @@ define @vshl_vx_nxv1i64_1( %va) { ; CHECK-LABEL: vshl_vx_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -510,7 +510,7 @@ define @vshl_vx_nxv2i64_1( %va) { ; CHECK-LABEL: vshl_vx_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -559,7 +559,7 @@ define @vshl_vx_nxv4i64_1( %va) { ; CHECK-LABEL: vshl_vx_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @vshl_vx_nxv8i64_1( %va) { ; CHECK-LABEL: vshl_vx_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll @@ -41,14 +41,14 @@ define @vsplat_nxv8i64_3() { ; RV32V-LABEL: vsplat_nxv8i64_3: ; RV32V: # %bb.0: -; RV32V-NEXT: addi a0, zero, 255 +; RV32V-NEXT: li a0, 255 ; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vmv.v.x v8, a0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_3: ; RV64V: # %bb.0: -; RV64V-NEXT: addi a0, zero, 255 +; RV64V-NEXT: li a0, 255 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: ret @@ -74,7 +74,7 @@ ; ; RV64V-LABEL: vsplat_nxv8i64_4: ; RV64V: # %bb.0: -; RV64V-NEXT: addi a0, zero, 251 +; RV64V-NEXT: li a0, 251 ; RV64V-NEXT: slli a0, a0, 24 ; RV64V-NEXT: addi a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu @@ -147,14 +147,14 @@ define @vadd_vx_nxv8i64_8( %v) { ; RV32V-LABEL: vadd_vx_nxv8i64_8: ; RV32V: # %bb.0: -; RV32V-NEXT: addi a0, zero, 255 +; RV32V-NEXT: li a0, 255 ; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vadd.vx v8, v8, a0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_8: ; RV64V: # %bb.0: -; RV64V-NEXT: addi a0, zero, 255 +; RV64V-NEXT: li a0, 255 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vx v8, v8, a0 ; RV64V-NEXT: ret @@ -204,7 +204,7 @@ ; ; RV64V-LABEL: vadd_vx_nxv8i64_10: ; RV64V: # %bb.0: -; RV64V-NEXT: addi a0, zero, 251 +; RV64V-NEXT: li a0, 251 ; RV64V-NEXT: slli a0, a0, 24 ; RV64V-NEXT: addi a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu @@ -221,7 +221,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: addi sp, sp, -16 ; RV32V-NEXT: .cfi_def_cfa_offset 16 -; RV32V-NEXT: addi a0, zero, 1 +; RV32V-NEXT: li a0, 1 ; RV32V-NEXT: sw a0, 12(sp) ; RV32V-NEXT: lui a0, 1028096 ; RV32V-NEXT: addi a0, a0, -1281 @@ -235,7 +235,7 @@ ; ; RV64V-LABEL: vadd_vx_nxv8i64_11: ; RV64V: # %bb.0: -; RV64V-NEXT: addi a0, zero, 507 +; RV64V-NEXT: li a0, 507 ; RV64V-NEXT: slli a0, a0, 24 ; RV64V-NEXT: addi a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll @@ -651,7 +651,7 @@ define @vsra_vi_nxv1i64_1( %va) { ; CHECK-LABEL: vsra_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -698,7 +698,7 @@ define @vsra_vi_nxv2i64_1( %va) { ; CHECK-LABEL: vsra_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -745,7 +745,7 @@ define @vsra_vi_nxv4i64_1( %va) { ; CHECK-LABEL: vsra_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -792,7 +792,7 @@ define @vsra_vi_nxv8i64_1( %va) { ; CHECK-LABEL: vsra_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vmv.v.x v9, a0 -; CHECK-NEXT: addi a0, zero, 127 +; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll @@ -461,7 +461,7 @@ define @vsrl_vx_nxv1i64_1( %va) { ; CHECK-LABEL: vsrl_vx_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -498,7 +498,7 @@ define @vsrl_vx_nxv2i64_1( %va) { ; CHECK-LABEL: vsrl_vx_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -535,7 +535,7 @@ define @vsrl_vx_nxv4i64_1( %va) { ; CHECK-LABEL: vsrl_vx_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -572,7 +572,7 @@ define @vsrl_vx_nxv8i64_1( %va) { ; CHECK-LABEL: vsrl_vx_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -9,7 +9,7 @@ define @vsrl_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, zero, 127 +; CHECK-NEXT: li a2, 127 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll @@ -31,7 +31,7 @@ define @ssub_nxv1i8_vi( %va) { ; CHECK-LABEL: ssub_nxv1i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define @ssub_nxv2i8_vi( %va) { ; CHECK-LABEL: ssub_nxv2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define @ssub_nxv4i8_vi( %va) { ; CHECK-LABEL: ssub_nxv4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -142,7 +142,7 @@ define @ssub_nxv8i8_vi( %va) { ; CHECK-LABEL: ssub_nxv8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -179,7 +179,7 @@ define @ssub_nxv16i8_vi( %va) { ; CHECK-LABEL: ssub_nxv16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define @ssub_nxv32i8_vi( %va) { ; CHECK-LABEL: ssub_nxv32i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define @ssub_nxv64i8_vi( %va) { ; CHECK-LABEL: ssub_nxv64i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define @ssub_nxv1i16_vi( %va) { ; CHECK-LABEL: ssub_nxv1i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -327,7 +327,7 @@ define @ssub_nxv2i16_vi( %va) { ; CHECK-LABEL: ssub_nxv2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -364,7 +364,7 @@ define @ssub_nxv4i16_vi( %va) { ; CHECK-LABEL: ssub_nxv4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @ssub_nxv8i16_vi( %va) { ; CHECK-LABEL: ssub_nxv8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -438,7 +438,7 @@ define @ssub_nxv16i16_vi( %va) { ; CHECK-LABEL: ssub_nxv16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -475,7 +475,7 @@ define @ssub_nxv32i16_vi( %va) { ; CHECK-LABEL: ssub_nxv32i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -512,7 +512,7 @@ define @ssub_nxv1i32_vi( %va) { ; CHECK-LABEL: ssub_nxv1i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ define @ssub_nxv2i32_vi( %va) { ; CHECK-LABEL: ssub_nxv2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @ssub_nxv4i32_vi( %va) { ; CHECK-LABEL: ssub_nxv4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -623,7 +623,7 @@ define @ssub_nxv8i32_vi( %va) { ; CHECK-LABEL: ssub_nxv8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -660,7 +660,7 @@ define @ssub_nxv16i32_vi( %va) { ; CHECK-LABEL: ssub_nxv16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -710,7 +710,7 @@ define @ssub_nxv1i64_vi( %va) { ; CHECK-LABEL: ssub_nxv1i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -760,7 +760,7 @@ define @ssub_nxv2i64_vi( %va) { ; CHECK-LABEL: ssub_nxv2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -810,7 +810,7 @@ define @ssub_nxv4i64_vi( %va) { ; CHECK-LABEL: ssub_nxv4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -860,7 +860,7 @@ define @ssub_nxv8i64_vi( %va) { ; CHECK-LABEL: ssub_nxv8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll @@ -31,7 +31,7 @@ define @usub_nxv1i8_vi( %va) { ; CHECK-LABEL: usub_nxv1i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define @usub_nxv2i8_vi( %va) { ; CHECK-LABEL: usub_nxv2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ define @usub_nxv4i8_vi( %va) { ; CHECK-LABEL: usub_nxv4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -142,7 +142,7 @@ define @usub_nxv8i8_vi( %va) { ; CHECK-LABEL: usub_nxv8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -179,7 +179,7 @@ define @usub_nxv16i8_vi( %va) { ; CHECK-LABEL: usub_nxv16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define @usub_nxv32i8_vi( %va) { ; CHECK-LABEL: usub_nxv32i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define @usub_nxv64i8_vi( %va) { ; CHECK-LABEL: usub_nxv64i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define @usub_nxv1i16_vi( %va) { ; CHECK-LABEL: usub_nxv1i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -327,7 +327,7 @@ define @usub_nxv2i16_vi( %va) { ; CHECK-LABEL: usub_nxv2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -364,7 +364,7 @@ define @usub_nxv4i16_vi( %va) { ; CHECK-LABEL: usub_nxv4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @usub_nxv8i16_vi( %va) { ; CHECK-LABEL: usub_nxv8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -438,7 +438,7 @@ define @usub_nxv16i16_vi( %va) { ; CHECK-LABEL: usub_nxv16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -475,7 +475,7 @@ define @usub_nxv32i16_vi( %va) { ; CHECK-LABEL: usub_nxv32i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -512,7 +512,7 @@ define @usub_nxv1i32_vi( %va) { ; CHECK-LABEL: usub_nxv1i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ define @usub_nxv2i32_vi( %va) { ; CHECK-LABEL: usub_nxv2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @usub_nxv4i32_vi( %va) { ; CHECK-LABEL: usub_nxv4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -623,7 +623,7 @@ define @usub_nxv8i32_vi( %va) { ; CHECK-LABEL: usub_nxv8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -660,7 +660,7 @@ define @usub_nxv16i32_vi( %va) { ; CHECK-LABEL: usub_nxv16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -710,7 +710,7 @@ define @usub_nxv1i64_vi( %va) { ; CHECK-LABEL: usub_nxv1i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -760,7 +760,7 @@ define @usub_nxv2i64_vi( %va) { ; CHECK-LABEL: usub_nxv2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -810,7 +810,7 @@ define @usub_nxv4i64_vi( %va) { ; CHECK-LABEL: usub_nxv4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -860,7 +860,7 @@ define @usub_nxv8i64_vi( %va) { ; CHECK-LABEL: usub_nxv8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll @@ -27,7 +27,7 @@ define @vsub_vx_nxv1i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -77,7 +77,7 @@ define @vsub_vx_nxv2i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -112,7 +112,7 @@ define @vsub_vx_nxv4i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -147,7 +147,7 @@ define @vsub_vx_nxv8i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -182,7 +182,7 @@ define @vsub_vx_nxv16i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -217,7 +217,7 @@ define @vsub_vx_nxv32i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @vsub_vx_nxv64i8_0( %va) { ; CHECK-LABEL: vsub_vx_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -287,7 +287,7 @@ define @vsub_vx_nxv1i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -322,7 +322,7 @@ define @vsub_vx_nxv2i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -357,7 +357,7 @@ define @vsub_vx_nxv4i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -392,7 +392,7 @@ define @vsub_vx_nxv8i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -427,7 +427,7 @@ define @vsub_vx_nxv16i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -462,7 +462,7 @@ define @vsub_vx_nxv32i16_0( %va) { ; CHECK-LABEL: vsub_vx_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -497,7 +497,7 @@ define @vsub_vx_nxv1i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define @vsub_vx_nxv2i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @vsub_vx_nxv4i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -602,7 +602,7 @@ define @vsub_vx_nxv8i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -637,7 +637,7 @@ define @vsub_vx_nxv16i32_0( %va) { ; CHECK-LABEL: vsub_vx_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -685,7 +685,7 @@ define @vsub_vx_nxv1i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -733,7 +733,7 @@ define @vsub_vx_nxv2i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -781,7 +781,7 @@ define @vsub_vx_nxv4i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -829,7 +829,7 @@ define @vsub_vx_nxv8i64_0( %va) { ; CHECK-LABEL: vsub_vx_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -51,7 +51,7 @@ define @vxor_vi_nxv1i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv1i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -110,7 +110,7 @@ define @vxor_vi_nxv2i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv2i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -169,7 +169,7 @@ define @vxor_vi_nxv4i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv4i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -228,7 +228,7 @@ define @vxor_vi_nxv8i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -287,7 +287,7 @@ define @vxor_vi_nxv16i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv16i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vxor_vi_nxv32i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv32i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -405,7 +405,7 @@ define @vxor_vi_nxv64i8_2( %va) { ; CHECK-LABEL: vxor_vi_nxv64i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -464,7 +464,7 @@ define @vxor_vi_nxv1i16_2( %va) { ; CHECK-LABEL: vxor_vi_nxv1i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vxor_vi_nxv2i16_2( %va) { ; CHECK-LABEL: vxor_vi_nxv2i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -582,7 +582,7 @@ define @vxor_vi_nxv4i16_2( %va) { ; CHECK-LABEL: vxor_vi_nxv4i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -641,7 +641,7 @@ define @vxor_vi_nxv8i16_2( %va) { ; CHECK-LABEL: vxor_vi_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @vxor_vi_nxv16i16_2( %va) { ; CHECK-LABEL: vxor_vi_nxv16i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -759,7 +759,7 @@ define @vxor_vi_nxv32i16_2( %va) { ; CHECK-LABEL: vxor_vi_nxv32i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -818,7 +818,7 @@ define @vxor_vi_nxv1i32_2( %va) { ; CHECK-LABEL: vxor_vi_nxv1i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -877,7 +877,7 @@ define @vxor_vi_nxv2i32_2( %va) { ; CHECK-LABEL: vxor_vi_nxv2i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -936,7 +936,7 @@ define @vxor_vi_nxv4i32_2( %va) { ; CHECK-LABEL: vxor_vi_nxv4i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -995,7 +995,7 @@ define @vxor_vi_nxv8i32_2( %va) { ; CHECK-LABEL: vxor_vi_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1054,7 +1054,7 @@ define @vxor_vi_nxv16i32_2( %va) { ; CHECK-LABEL: vxor_vi_nxv16i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1126,7 +1126,7 @@ define @vxor_vi_nxv1i64_2( %va) { ; CHECK-LABEL: vxor_vi_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1198,7 +1198,7 @@ define @vxor_vi_nxv2i64_2( %va) { ; CHECK-LABEL: vxor_vi_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1270,7 +1270,7 @@ define @vxor_vi_nxv4i64_2( %va) { ; CHECK-LABEL: vxor_vi_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define @vxor_vi_nxv8i64_2( %va) { ; CHECK-LABEL: vxor_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll --- a/llvm/test/CodeGen/RISCV/sadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll @@ -112,7 +112,7 @@ ; RV64I-NEXT: beq a1, a2, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srai a0, a0, 63 -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: .LBB1_2: @@ -146,7 +146,7 @@ ; RV64IZbbNOZbt-NEXT: beq a1, a2, .LBB1_2 ; RV64IZbbNOZbt-NEXT: # %bb.1: ; RV64IZbbNOZbt-NEXT: srai a0, a0, 63 -; RV64IZbbNOZbt-NEXT: addi a1, zero, -1 +; RV64IZbbNOZbt-NEXT: li a1, -1 ; RV64IZbbNOZbt-NEXT: slli a1, a1, 63 ; RV64IZbbNOZbt-NEXT: xor a0, a0, a1 ; RV64IZbbNOZbt-NEXT: .LBB1_2: @@ -176,7 +176,7 @@ ; RV64IZbbZbt-NEXT: slti a1, a1, 0 ; RV64IZbbZbt-NEXT: xor a0, a1, a0 ; RV64IZbbZbt-NEXT: srai a1, a2, 63 -; RV64IZbbZbt-NEXT: addi a3, zero, -1 +; RV64IZbbZbt-NEXT: li a3, -1 ; RV64IZbbZbt-NEXT: slli a3, a3, 63 ; RV64IZbbZbt-NEXT: xor a1, a1, a3 ; RV64IZbbZbt-NEXT: cmov a0, a0, a1, a2 @@ -251,54 +251,54 @@ ; RV32I-LABEL: func8: ; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 127 +; RV32I-NEXT: li a1, 127 ; RV32I-NEXT: bge a0, a1, .LBB3_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: bge a1, a0, .LBB3_4 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB3_3: -; RV32I-NEXT: addi a0, zero, 127 -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a0, 127 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: blt a1, a0, .LBB3_2 ; RV32I-NEXT: .LBB3_4: -; RV32I-NEXT: addi a0, zero, -128 +; RV32I-NEXT: li a0, -128 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 127 +; RV64I-NEXT: li a1, 127 ; RV64I-NEXT: bge a0, a1, .LBB3_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: bge a1, a0, .LBB3_4 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB3_3: -; RV64I-NEXT: addi a0, zero, 127 -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a0, 127 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: blt a1, a0, .LBB3_2 ; RV64I-NEXT: .LBB3_4: -; RV64I-NEXT: addi a0, zero, -128 +; RV64I-NEXT: li a0, -128 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 127 +; RV32IZbb-NEXT: li a1, 127 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -128 +; RV32IZbb-NEXT: li a1, -128 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func8: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 127 +; RV64IZbb-NEXT: li a1, 127 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -128 +; RV64IZbb-NEXT: li a1, -128 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y); @@ -309,54 +309,54 @@ ; RV32I-LABEL: func3: ; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 7 +; RV32I-NEXT: li a1, 7 ; RV32I-NEXT: bge a0, a1, .LBB4_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: bge a1, a0, .LBB4_4 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_3: -; RV32I-NEXT: addi a0, zero, 7 -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a0, 7 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: blt a1, a0, .LBB4_2 ; RV32I-NEXT: .LBB4_4: -; RV32I-NEXT: addi a0, zero, -8 +; RV32I-NEXT: li a0, -8 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func3: ; RV64I: # %bb.0: ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 7 +; RV64I-NEXT: li a1, 7 ; RV64I-NEXT: bge a0, a1, .LBB4_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: bge a1, a0, .LBB4_4 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_3: -; RV64I-NEXT: addi a0, zero, 7 -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a0, 7 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: blt a1, a0, .LBB4_2 ; RV64I-NEXT: .LBB4_4: -; RV64I-NEXT: addi a0, zero, -8 +; RV64I-NEXT: li a0, -8 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func3: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 7 +; RV32IZbb-NEXT: li a1, 7 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -8 +; RV32IZbb-NEXT: li a1, -8 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func3: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 7 +; RV64IZbb-NEXT: li a1, 7 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -8 +; RV64IZbb-NEXT: li a1, -8 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y); diff --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll @@ -120,7 +120,7 @@ ; RV64I-NEXT: beq a2, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srai a0, a0, 63 -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: .LBB1_2: @@ -154,7 +154,7 @@ ; RV64IZbbNOZbt-NEXT: beq a2, a1, .LBB1_2 ; RV64IZbbNOZbt-NEXT: # %bb.1: ; RV64IZbbNOZbt-NEXT: srai a0, a0, 63 -; RV64IZbbNOZbt-NEXT: addi a1, zero, -1 +; RV64IZbbNOZbt-NEXT: li a1, -1 ; RV64IZbbNOZbt-NEXT: slli a1, a1, 63 ; RV64IZbbNOZbt-NEXT: xor a0, a0, a1 ; RV64IZbbNOZbt-NEXT: .LBB1_2: @@ -184,7 +184,7 @@ ; RV64IZbbZbt-NEXT: slti a2, a2, 0 ; RV64IZbbZbt-NEXT: xor a0, a2, a0 ; RV64IZbbZbt-NEXT: srai a2, a1, 63 -; RV64IZbbZbt-NEXT: addi a3, zero, -1 +; RV64IZbbZbt-NEXT: li a3, -1 ; RV64IZbbZbt-NEXT: slli a3, a3, 63 ; RV64IZbbZbt-NEXT: xor a2, a2, a3 ; RV64IZbbZbt-NEXT: cmov a0, a0, a2, a1 @@ -282,19 +282,19 @@ ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: srai a1, a1, 24 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 127 +; RV32I-NEXT: li a1, 127 ; RV32I-NEXT: bge a0, a1, .LBB3_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: bge a1, a0, .LBB3_4 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB3_3: -; RV32I-NEXT: addi a0, zero, 127 -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a0, 127 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: blt a1, a0, .LBB3_2 ; RV32I-NEXT: .LBB3_4: -; RV32I-NEXT: addi a0, zero, -128 +; RV32I-NEXT: li a0, -128 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: @@ -305,19 +305,19 @@ ; RV64I-NEXT: slli a1, a1, 56 ; RV64I-NEXT: srai a1, a1, 56 ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 127 +; RV64I-NEXT: li a1, 127 ; RV64I-NEXT: bge a0, a1, .LBB3_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: bge a1, a0, .LBB3_4 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB3_3: -; RV64I-NEXT: addi a0, zero, 127 -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a0, 127 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: blt a1, a0, .LBB3_2 ; RV64I-NEXT: .LBB3_4: -; RV64I-NEXT: addi a0, zero, -128 +; RV64I-NEXT: li a0, -128 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: @@ -326,9 +326,9 @@ ; RV32IZbb-NEXT: mul a1, a1, a2 ; RV32IZbb-NEXT: sext.b a1, a1 ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 127 +; RV32IZbb-NEXT: li a1, 127 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -128 +; RV32IZbb-NEXT: li a1, -128 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; @@ -338,9 +338,9 @@ ; RV64IZbb-NEXT: mul a1, a1, a2 ; RV64IZbb-NEXT: sext.b a1, a1 ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 127 +; RV64IZbb-NEXT: li a1, 127 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -128 +; RV64IZbb-NEXT: li a1, -128 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %a = mul i8 %y, %z @@ -357,19 +357,19 @@ ; RV32I-NEXT: slli a1, a1, 28 ; RV32I-NEXT: srai a1, a1, 28 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 7 +; RV32I-NEXT: li a1, 7 ; RV32I-NEXT: bge a0, a1, .LBB4_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: bge a1, a0, .LBB4_4 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_3: -; RV32I-NEXT: addi a0, zero, 7 -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a0, 7 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: blt a1, a0, .LBB4_2 ; RV32I-NEXT: .LBB4_4: -; RV32I-NEXT: addi a0, zero, -8 +; RV32I-NEXT: li a0, -8 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func4: @@ -380,19 +380,19 @@ ; RV64I-NEXT: slli a1, a1, 60 ; RV64I-NEXT: srai a1, a1, 60 ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 7 +; RV64I-NEXT: li a1, 7 ; RV64I-NEXT: bge a0, a1, .LBB4_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: bge a1, a0, .LBB4_4 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_3: -; RV64I-NEXT: addi a0, zero, 7 -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a0, 7 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: blt a1, a0, .LBB4_2 ; RV64I-NEXT: .LBB4_4: -; RV64I-NEXT: addi a0, zero, -8 +; RV64I-NEXT: li a0, -8 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func4: @@ -403,9 +403,9 @@ ; RV32IZbb-NEXT: slli a1, a1, 28 ; RV32IZbb-NEXT: srai a1, a1, 28 ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 7 +; RV32IZbb-NEXT: li a1, 7 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -8 +; RV32IZbb-NEXT: li a1, -8 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; @@ -417,9 +417,9 @@ ; RV64IZbb-NEXT: slli a1, a1, 60 ; RV64IZbb-NEXT: srai a1, a1, 60 ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 7 +; RV64IZbb-NEXT: li a1, 7 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -8 +; RV64IZbb-NEXT: li a1, -8 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %a = mul i4 %y, %z diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll --- a/llvm/test/CodeGen/RISCV/select-cc.ll +++ b/llvm/test/CodeGen/RISCV/select-cc.ll @@ -105,7 +105,7 @@ ; RV32IBT-NEXT: slti a2, a5, 1 ; RV32IBT-NEXT: lw a1, 0(a1) ; RV32IBT-NEXT: cmov a0, a2, a0, a5 -; RV32IBT-NEXT: addi a2, zero, -1 +; RV32IBT-NEXT: li a2, -1 ; RV32IBT-NEXT: slt a2, a2, a5 ; RV32IBT-NEXT: cmov a0, a2, a0, a1 ; RV32IBT-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll --- a/llvm/test/CodeGen/RISCV/select-const.ll +++ b/llvm/test/CodeGen/RISCV/select-const.ll @@ -63,49 +63,49 @@ define signext i32 @select_const_int_one_away(i1 zeroext %a) nounwind { ; RV32I-LABEL: select_const_int_one_away: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 4 +; RV32I-NEXT: li a1, 4 ; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: ret ; ; RV32IF-LABEL: select_const_int_one_away: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi a1, zero, 4 +; RV32IF-NEXT: li a1, 4 ; RV32IF-NEXT: sub a0, a1, a0 ; RV32IF-NEXT: ret ; ; RV32IBT-LABEL: select_const_int_one_away: ; RV32IBT: # %bb.0: -; RV32IBT-NEXT: addi a1, zero, 4 +; RV32IBT-NEXT: li a1, 4 ; RV32IBT-NEXT: sub a0, a1, a0 ; RV32IBT-NEXT: ret ; ; RV32IFBT-LABEL: select_const_int_one_away: ; RV32IFBT: # %bb.0: -; RV32IFBT-NEXT: addi a1, zero, 4 +; RV32IFBT-NEXT: li a1, 4 ; RV32IFBT-NEXT: sub a0, a1, a0 ; RV32IFBT-NEXT: ret ; ; RV64I-LABEL: select_const_int_one_away: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a1, zero, 4 +; RV64I-NEXT: li a1, 4 ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IFD-LABEL: select_const_int_one_away: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi a1, zero, 4 +; RV64IFD-NEXT: li a1, 4 ; RV64IFD-NEXT: sub a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV64IBT-LABEL: select_const_int_one_away: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: addi a1, zero, 4 +; RV64IBT-NEXT: li a1, 4 ; RV64IBT-NEXT: sub a0, a1, a0 ; RV64IBT-NEXT: ret ; ; RV64IFDBT-LABEL: select_const_int_one_away: ; RV64IFDBT: # %bb.0: -; RV64IFDBT-NEXT: addi a1, zero, 4 +; RV64IFDBT-NEXT: li a1, 4 ; RV64IFDBT-NEXT: sub a0, a1, a0 ; RV64IFDBT-NEXT: ret %1 = select i1 %a, i32 3, i32 4 @@ -160,68 +160,68 @@ ; RV32I-LABEL: select_const_int_harder: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: addi a0, zero, 6 +; RV32I-NEXT: li a0, 6 ; RV32I-NEXT: bnez a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, zero, 38 +; RV32I-NEXT: li a0, 38 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; ; RV32IF-LABEL: select_const_int_harder: ; RV32IF: # %bb.0: ; RV32IF-NEXT: mv a1, a0 -; RV32IF-NEXT: addi a0, zero, 6 +; RV32IF-NEXT: li a0, 6 ; RV32IF-NEXT: bnez a1, .LBB3_2 ; RV32IF-NEXT: # %bb.1: -; RV32IF-NEXT: addi a0, zero, 38 +; RV32IF-NEXT: li a0, 38 ; RV32IF-NEXT: .LBB3_2: ; RV32IF-NEXT: ret ; ; RV32IBT-LABEL: select_const_int_harder: ; RV32IBT: # %bb.0: -; RV32IBT-NEXT: addi a1, zero, 38 -; RV32IBT-NEXT: addi a2, zero, 6 +; RV32IBT-NEXT: li a1, 38 +; RV32IBT-NEXT: li a2, 6 ; RV32IBT-NEXT: cmov a0, a0, a2, a1 ; RV32IBT-NEXT: ret ; ; RV32IFBT-LABEL: select_const_int_harder: ; RV32IFBT: # %bb.0: -; RV32IFBT-NEXT: addi a1, zero, 38 -; RV32IFBT-NEXT: addi a2, zero, 6 +; RV32IFBT-NEXT: li a1, 38 +; RV32IFBT-NEXT: li a2, 6 ; RV32IFBT-NEXT: cmov a0, a0, a2, a1 ; RV32IFBT-NEXT: ret ; ; RV64I-LABEL: select_const_int_harder: ; RV64I: # %bb.0: ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: addi a0, zero, 6 +; RV64I-NEXT: li a0, 6 ; RV64I-NEXT: bnez a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, zero, 38 +; RV64I-NEXT: li a0, 38 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; ; RV64IFD-LABEL: select_const_int_harder: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: mv a1, a0 -; RV64IFD-NEXT: addi a0, zero, 6 +; RV64IFD-NEXT: li a0, 6 ; RV64IFD-NEXT: bnez a1, .LBB3_2 ; RV64IFD-NEXT: # %bb.1: -; RV64IFD-NEXT: addi a0, zero, 38 +; RV64IFD-NEXT: li a0, 38 ; RV64IFD-NEXT: .LBB3_2: ; RV64IFD-NEXT: ret ; ; RV64IBT-LABEL: select_const_int_harder: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: addi a1, zero, 38 -; RV64IBT-NEXT: addi a2, zero, 6 +; RV64IBT-NEXT: li a1, 38 +; RV64IBT-NEXT: li a2, 6 ; RV64IBT-NEXT: cmov a0, a0, a2, a1 ; RV64IBT-NEXT: ret ; ; RV64IFDBT-LABEL: select_const_int_harder: ; RV64IFDBT: # %bb.0: -; RV64IFDBT-NEXT: addi a1, zero, 38 -; RV64IFDBT-NEXT: addi a2, zero, 6 +; RV64IFDBT-NEXT: li a1, 38 +; RV64IFDBT-NEXT: li a2, 6 ; RV64IFDBT-NEXT: cmov a0, a0, a2, a1 ; RV64IFDBT-NEXT: ret %1 = select i1 %a, i32 6, i32 38 diff --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll --- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll +++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll @@ -48,7 +48,7 @@ define i32 @selecti64i32(i64 %a) { ; RV32-LABEL: selecti64i32: ; RV32: # %bb.0: -; RV32-NEXT: addi a0, zero, -1 +; RV32-NEXT: li a0, -1 ; RV32-NEXT: slt a0, a0, a1 ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: sub a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll --- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll +++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll @@ -15,7 +15,7 @@ define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind { ; RV32I-LABEL: cmovcc64: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi a5, zero, 123 +; RV32I-NEXT: li a5, 123 ; RV32I-NEXT: beq a0, a5, .LBB0_2 ; RV32I-NEXT: # %bb.1: # %entry ; RV32I-NEXT: mv a1, a3 @@ -34,7 +34,7 @@ ; ; RV64I-LABEL: cmovcc64: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: addi a3, zero, 123 +; RV64I-NEXT: li a3, 123 ; RV64I-NEXT: beq a0, a3, .LBB0_2 ; RV64I-NEXT: # %bb.1: # %entry ; RV64I-NEXT: mv a1, a2 @@ -119,7 +119,7 @@ ; ; RV64I-LABEL: cmovcc128: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: addi a5, zero, 123 +; RV64I-NEXT: li a5, 123 ; RV64I-NEXT: beq a0, a5, .LBB1_2 ; RV64I-NEXT: # %bb.1: # %entry ; RV64I-NEXT: mv a1, a3 @@ -412,7 +412,7 @@ define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind { ; RV32I-LABEL: cmovccdep: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi a4, zero, 123 +; RV32I-NEXT: li a4, 123 ; RV32I-NEXT: bne a0, a4, .LBB6_3 ; RV32I-NEXT: # %bb.1: # %entry ; RV32I-NEXT: mv a2, a1 @@ -439,7 +439,7 @@ ; ; RV64I-LABEL: cmovccdep: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: addi a4, zero, 123 +; RV64I-NEXT: li a4, 123 ; RV64I-NEXT: bne a0, a4, .LBB6_3 ; RV64I-NEXT: # %bb.1: # %entry ; RV64I-NEXT: mv a2, a1 diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll --- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -211,7 +211,7 @@ ; RV32I-LABEL: zext_i1_to_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i64: @@ -254,7 +254,7 @@ ; RV32I-LABEL: zext_i8_to_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i64: @@ -289,7 +289,7 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i16_to_i64: @@ -305,7 +305,7 @@ define i64 @zext_i32_to_i64(i32 %a) nounwind { ; RV32I-LABEL: zext_i32_to_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i32_to_i64: diff --git a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll --- a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll +++ b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll @@ -166,11 +166,11 @@ ; RV32I-NEXT: bltz a4, .LBB9_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a4 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB9_2: ; RV32I-NEXT: sll a1, a1, a2 -; RV32I-NEXT: addi a4, zero, 31 +; RV32I-NEXT: li a4, 31 ; RV32I-NEXT: sub a3, a4, a3 ; RV32I-NEXT: srli a4, a0, 1 ; RV32I-NEXT: srl a3, a4, a3 @@ -198,11 +198,11 @@ ; RV32I-NEXT: bltz a4, .LBB10_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a4 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB10_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a4, zero, 31 +; RV32I-NEXT: li a4, 31 ; RV32I-NEXT: sub a3, a4, a3 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 @@ -234,7 +234,7 @@ ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB11_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a4, zero, 31 +; RV32I-NEXT: li a4, 31 ; RV32I-NEXT: sub a3, a4, a3 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -17,11 +17,11 @@ ; RV32I-NEXT: bltz a3, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a3 -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a3, zero, 31 +; RV32I-NEXT: li a3, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 @@ -66,7 +66,7 @@ ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a3, zero, 31 +; RV32I-NEXT: li a3, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 @@ -107,11 +107,11 @@ ; RV32I-NEXT: bltz a3, .LBB4_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a3 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: sll a1, a1, a2 -; RV32I-NEXT: addi a3, zero, 31 +; RV32I-NEXT: li a3, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: srli a4, a0, 1 ; RV32I-NEXT: srl a3, a4, a3 @@ -153,11 +153,11 @@ ; RV32I-NEXT: lw a2, 0(a2) ; RV32I-NEXT: lw t0, 8(a1) ; RV32I-NEXT: lw t4, 12(a1) -; RV32I-NEXT: addi a6, zero, 64 +; RV32I-NEXT: li a6, 64 ; RV32I-NEXT: sub t1, a6, a2 -; RV32I-NEXT: addi a3, zero, 32 +; RV32I-NEXT: li a3, 32 ; RV32I-NEXT: sub t5, a3, a2 -; RV32I-NEXT: addi t2, zero, 31 +; RV32I-NEXT: li t2, 31 ; RV32I-NEXT: bltz t5, .LBB6_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a3, t0, t5 @@ -180,7 +180,7 @@ ; RV32I-NEXT: addi t3, a2, -64 ; RV32I-NEXT: bltz a4, .LBB6_7 ; RV32I-NEXT: # %bb.6: -; RV32I-NEXT: mv a7, zero +; RV32I-NEXT: li a7, 0 ; RV32I-NEXT: bgeu a2, a6, .LBB6_8 ; RV32I-NEXT: j .LBB6_9 ; RV32I-NEXT: .LBB6_7: @@ -218,7 +218,7 @@ ; RV32I-NEXT: bgeu a2, a6, .LBB6_18 ; RV32I-NEXT: j .LBB6_19 ; RV32I-NEXT: .LBB6_17: -; RV32I-NEXT: addi a4, zero, 95 +; RV32I-NEXT: li a4, 95 ; RV32I-NEXT: sub a4, a4, a2 ; RV32I-NEXT: sll a4, a3, a4 ; RV32I-NEXT: srl a1, t0, t3 @@ -243,18 +243,18 @@ ; RV32I-NEXT: or a3, a1, a3 ; RV32I-NEXT: bltu a2, a6, .LBB6_25 ; RV32I-NEXT: .LBB6_24: -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: .LBB6_25: ; RV32I-NEXT: bltz t6, .LBB6_27 ; RV32I-NEXT: # %bb.26: -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: bgeu a2, a6, .LBB6_28 ; RV32I-NEXT: j .LBB6_29 ; RV32I-NEXT: .LBB6_27: ; RV32I-NEXT: srl a4, t4, a2 ; RV32I-NEXT: bltu a2, a6, .LBB6_29 ; RV32I-NEXT: .LBB6_28: -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: .LBB6_29: ; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: sw a3, 8(a0) @@ -270,11 +270,11 @@ ; RV64I-NEXT: bltz a3, .LBB6_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srl a0, a1, a3 -; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: srl a0, a0, a2 -; RV64I-NEXT: addi a3, zero, 63 +; RV64I-NEXT: li a3, 63 ; RV64I-NEXT: sub a3, a3, a2 ; RV64I-NEXT: slli a4, a1, 1 ; RV64I-NEXT: sll a3, a4, a3 @@ -294,11 +294,11 @@ ; RV32I-NEXT: lw a2, 0(a2) ; RV32I-NEXT: lw t2, 8(a1) ; RV32I-NEXT: lw t5, 12(a1) -; RV32I-NEXT: addi a6, zero, 64 +; RV32I-NEXT: li a6, 64 ; RV32I-NEXT: sub t1, a6, a2 -; RV32I-NEXT: addi a3, zero, 32 +; RV32I-NEXT: li a3, 32 ; RV32I-NEXT: sub t6, a3, a2 -; RV32I-NEXT: addi t4, zero, 31 +; RV32I-NEXT: li t4, 31 ; RV32I-NEXT: bltz t6, .LBB7_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll s0, t2, t6 @@ -360,7 +360,7 @@ ; RV32I-NEXT: bgeu a2, a6, .LBB7_18 ; RV32I-NEXT: j .LBB7_19 ; RV32I-NEXT: .LBB7_17: -; RV32I-NEXT: addi a4, zero, 95 +; RV32I-NEXT: li a4, 95 ; RV32I-NEXT: sub a4, a4, a2 ; RV32I-NEXT: sll a4, s0, a4 ; RV32I-NEXT: srl a1, t2, t3 @@ -417,7 +417,7 @@ ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: srl a0, a0, a2 -; RV64I-NEXT: addi a3, zero, 63 +; RV64I-NEXT: li a3, 63 ; RV64I-NEXT: sub a3, a3, a2 ; RV64I-NEXT: slli a4, a1, 1 ; RV64I-NEXT: sll a3, a4, a3 @@ -436,11 +436,11 @@ ; RV32I-NEXT: lw a2, 0(a2) ; RV32I-NEXT: lw t0, 4(a1) ; RV32I-NEXT: lw t4, 0(a1) -; RV32I-NEXT: addi a6, zero, 64 +; RV32I-NEXT: li a6, 64 ; RV32I-NEXT: sub t1, a6, a2 -; RV32I-NEXT: addi a3, zero, 32 +; RV32I-NEXT: li a3, 32 ; RV32I-NEXT: sub t5, a3, a2 -; RV32I-NEXT: addi t2, zero, 31 +; RV32I-NEXT: li t2, 31 ; RV32I-NEXT: bltz t5, .LBB8_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a3, t0, t5 @@ -463,7 +463,7 @@ ; RV32I-NEXT: addi t3, a2, -64 ; RV32I-NEXT: bltz a4, .LBB8_7 ; RV32I-NEXT: # %bb.6: -; RV32I-NEXT: mv a7, zero +; RV32I-NEXT: li a7, 0 ; RV32I-NEXT: bgeu a2, a6, .LBB8_8 ; RV32I-NEXT: j .LBB8_9 ; RV32I-NEXT: .LBB8_7: @@ -501,7 +501,7 @@ ; RV32I-NEXT: bgeu a2, a6, .LBB8_18 ; RV32I-NEXT: j .LBB8_19 ; RV32I-NEXT: .LBB8_17: -; RV32I-NEXT: addi a4, zero, 95 +; RV32I-NEXT: li a4, 95 ; RV32I-NEXT: sub a4, a4, a2 ; RV32I-NEXT: srl a4, a3, a4 ; RV32I-NEXT: sll a1, t0, t3 @@ -526,18 +526,18 @@ ; RV32I-NEXT: or a3, a1, a3 ; RV32I-NEXT: bltu a2, a6, .LBB8_25 ; RV32I-NEXT: .LBB8_24: -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: .LBB8_25: ; RV32I-NEXT: bltz t6, .LBB8_27 ; RV32I-NEXT: # %bb.26: -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: bgeu a2, a6, .LBB8_28 ; RV32I-NEXT: j .LBB8_29 ; RV32I-NEXT: .LBB8_27: ; RV32I-NEXT: sll a4, t4, a2 ; RV32I-NEXT: bltu a2, a6, .LBB8_29 ; RV32I-NEXT: .LBB8_28: -; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: .LBB8_29: ; RV32I-NEXT: sw a4, 0(a0) ; RV32I-NEXT: sw a3, 4(a0) @@ -553,11 +553,11 @@ ; RV64I-NEXT: bltz a3, .LBB8_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: sll a1, a0, a3 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: sll a1, a1, a2 -; RV64I-NEXT: addi a3, zero, 63 +; RV64I-NEXT: li a3, 63 ; RV64I-NEXT: sub a3, a3, a2 ; RV64I-NEXT: srli a4, a0, 1 ; RV64I-NEXT: srl a3, a4, a3 @@ -648,7 +648,7 @@ ; RV32I-NEXT: call __lshrdi3@plt ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: sw a1, 0(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi s9, zero, 64 +; RV32I-NEXT: li s9, 64 ; RV32I-NEXT: sub a2, s9, s0 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll --- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll +++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll @@ -15,7 +15,7 @@ ; RV32I-SW-NO: # %bb.0: ; RV32I-SW-NO-NEXT: addi sp, sp, -16 ; RV32I-SW-NO-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-SW-NO-NEXT: addi a1, zero, 32 +; RV32I-SW-NO-NEXT: li a1, 32 ; RV32I-SW-NO-NEXT: bgeu a1, a0, .LBB0_2 ; RV32I-SW-NO-NEXT: # %bb.1: # %if.end ; RV32I-SW-NO-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -26,7 +26,7 @@ ; ; RV32I-SW-LABEL: eliminate_restore: ; RV32I-SW: # %bb.0: -; RV32I-SW-NEXT: addi a1, zero, 32 +; RV32I-SW-NEXT: li a1, 32 ; RV32I-SW-NEXT: bgeu a1, a0, .LBB0_2 ; RV32I-SW-NEXT: # %bb.1: # %if.end ; RV32I-SW-NEXT: ret @@ -37,7 +37,7 @@ ; ; RV32I-SW-SR-LABEL: eliminate_restore: ; RV32I-SW-SR: # %bb.0: -; RV32I-SW-SR-NEXT: addi a1, zero, 32 +; RV32I-SW-SR-NEXT: li a1, 32 ; RV32I-SW-SR-NEXT: bgeu a1, a0, .LBB0_2 ; RV32I-SW-SR-NEXT: # %bb.1: # %if.end ; RV32I-SW-SR-NEXT: ret @@ -48,7 +48,7 @@ ; RV64I-SW-LABEL: eliminate_restore: ; RV64I-SW: # %bb.0: ; RV64I-SW-NEXT: sext.w a0, a0 -; RV64I-SW-NEXT: addi a1, zero, 32 +; RV64I-SW-NEXT: li a1, 32 ; RV64I-SW-NEXT: bgeu a1, a0, .LBB0_2 ; RV64I-SW-NEXT: # %bb.1: # %if.end ; RV64I-SW-NEXT: ret @@ -76,7 +76,7 @@ ; RV32I-SW-NO-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-SW-NO-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-SW-NO-NEXT: addi s0, sp, 16 -; RV32I-SW-NO-NEXT: addi a1, zero, 32 +; RV32I-SW-NO-NEXT: li a1, 32 ; RV32I-SW-NO-NEXT: bltu a1, a0, .LBB1_2 ; RV32I-SW-NO-NEXT: # %bb.1: # %if.then ; RV32I-SW-NO-NEXT: addi a0, a0, 15 @@ -93,7 +93,7 @@ ; ; RV32I-SW-LABEL: conditional_alloca: ; RV32I-SW: # %bb.0: -; RV32I-SW-NEXT: addi a1, zero, 32 +; RV32I-SW-NEXT: li a1, 32 ; RV32I-SW-NEXT: bltu a1, a0, .LBB1_2 ; RV32I-SW-NEXT: # %bb.1: # %if.then ; RV32I-SW-NEXT: addi sp, sp, -16 @@ -114,7 +114,7 @@ ; ; RV32I-SW-SR-LABEL: conditional_alloca: ; RV32I-SW-SR: # %bb.0: -; RV32I-SW-SR-NEXT: addi a1, zero, 32 +; RV32I-SW-SR-NEXT: li a1, 32 ; RV32I-SW-SR-NEXT: bltu a1, a0, .LBB1_2 ; RV32I-SW-SR-NEXT: # %bb.1: # %if.then ; RV32I-SW-SR-NEXT: call t0, __riscv_save_1 @@ -132,7 +132,7 @@ ; RV64I-SW-LABEL: conditional_alloca: ; RV64I-SW: # %bb.0: ; RV64I-SW-NEXT: sext.w a1, a0 -; RV64I-SW-NEXT: addi a2, zero, 32 +; RV64I-SW-NEXT: li a2, 32 ; RV64I-SW-NEXT: bltu a2, a1, .LBB1_2 ; RV64I-SW-NEXT: # %bb.1: # %if.then ; RV64I-SW-NEXT: addi sp, sp, -16 diff --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll --- a/llvm/test/CodeGen/RISCV/split-offsets.ll +++ b/llvm/test/CodeGen/RISCV/split-offsets.ll @@ -16,9 +16,9 @@ ; RV32I-NEXT: addi a2, a2, -1920 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: add a0, a0, a2 -; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: li a2, 2 ; RV32I-NEXT: sw a2, 0(a0) -; RV32I-NEXT: addi a3, zero, 1 +; RV32I-NEXT: li a3, 1 ; RV32I-NEXT: sw a3, 4(a0) ; RV32I-NEXT: sw a3, 0(a1) ; RV32I-NEXT: sw a2, 4(a1) @@ -31,9 +31,9 @@ ; RV64I-NEXT: addiw a2, a2, -1920 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: add a0, a0, a2 -; RV64I-NEXT: addi a2, zero, 2 +; RV64I-NEXT: li a2, 2 ; RV64I-NEXT: sw a2, 0(a0) -; RV64I-NEXT: addi a3, zero, 1 +; RV64I-NEXT: li a3, 1 ; RV64I-NEXT: sw a3, 4(a0) ; RV64I-NEXT: sw a3, 0(a1) ; RV64I-NEXT: sw a2, 4(a1) @@ -55,7 +55,7 @@ define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) { ; RV32I-LABEL: test2: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: lw a4, 0(a0) ; RV32I-NEXT: lui a0, 20 ; RV32I-NEXT: addi a5, a0, -1920 @@ -76,7 +76,7 @@ ; ; RV64I-LABEL: test2: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: mv a3, zero +; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: ld a4, 0(a0) ; RV64I-NEXT: lui a0, 20 ; RV64I-NEXT: addiw a5, a0, -1920 diff --git a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll --- a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll +++ b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll @@ -11,7 +11,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: addi a0, sp, 16 ; RV32I-NEXT: call foo@plt -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 2032 @@ -31,7 +31,7 @@ ; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32I-NEXT: addi a0, sp, 4 ; RV32I-NEXT: call foo@plt -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 2032 ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll @@ -13,7 +13,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -28,7 +28,7 @@ ; RV32IM-NEXT: srli a2, a1, 31 ; RV32IM-NEXT: srai a1, a1, 6 ; RV32IM-NEXT: add a1, a1, a2 -; RV32IM-NEXT: addi a2, zero, 95 +; RV32IM-NEXT: li a2, 95 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 ; RV32IM-NEXT: ret @@ -38,7 +38,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -55,7 +55,7 @@ ; RV64IM-NEXT: srliw a2, a1, 31 ; RV64IM-NEXT: sraiw a1, a1, 6 ; RV64IM-NEXT: addw a1, a1, a2 -; RV64IM-NEXT: addi a2, zero, 95 +; RV64IM-NEXT: li a2, 95 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -69,7 +69,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 1060 +; RV32I-NEXT: li a1, 1060 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -83,7 +83,7 @@ ; RV32IM-NEXT: srli a2, a1, 31 ; RV32IM-NEXT: srai a1, a1, 8 ; RV32IM-NEXT: add a1, a1, a2 -; RV32IM-NEXT: addi a2, zero, 1060 +; RV32IM-NEXT: li a2, 1060 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 ; RV32IM-NEXT: ret @@ -93,7 +93,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a1, zero, 1060 +; RV64I-NEXT: li a1, 1060 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -108,7 +108,7 @@ ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 ; RV64IM-NEXT: addw a1, a1, a2 -; RV64IM-NEXT: addi a2, zero, 1060 +; RV64IM-NEXT: li a2, 1060 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -122,7 +122,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, -723 +; RV32I-NEXT: li a1, -723 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -136,7 +136,7 @@ ; RV32IM-NEXT: srli a2, a1, 31 ; RV32IM-NEXT: srai a1, a1, 8 ; RV32IM-NEXT: add a1, a1, a2 -; RV32IM-NEXT: addi a2, zero, -723 +; RV32IM-NEXT: li a2, -723 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 ; RV32IM-NEXT: ret @@ -146,7 +146,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a1, zero, -723 +; RV64I-NEXT: li a1, -723 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -161,7 +161,7 @@ ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 ; RV64IM-NEXT: addw a1, a1, a2 -; RV64IM-NEXT: addi a2, zero, -723 +; RV64IM-NEXT: li a2, -723 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -236,10 +236,10 @@ ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: add a0, s1, a0 @@ -258,7 +258,7 @@ ; RV32IM-NEXT: srli a2, a1, 31 ; RV32IM-NEXT: srai a1, a1, 6 ; RV32IM-NEXT: add a1, a1, a2 -; RV32IM-NEXT: addi a2, zero, 95 +; RV32IM-NEXT: li a2, 95 ; RV32IM-NEXT: mul a2, a1, a2 ; RV32IM-NEXT: sub a0, a0, a2 ; RV32IM-NEXT: add a0, a0, a1 @@ -271,11 +271,11 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w s0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: addw a0, s1, a0 @@ -296,7 +296,7 @@ ; RV64IM-NEXT: srliw a2, a1, 31 ; RV64IM-NEXT: sraiw a1, a1, 6 ; RV64IM-NEXT: addw a1, a1, a2 -; RV64IM-NEXT: addi a2, zero, 95 +; RV64IM-NEXT: li a2, 95 ; RV64IM-NEXT: mulw a2, a1, a2 ; RV64IM-NEXT: subw a0, a0, a2 ; RV64IM-NEXT: addw a0, a0, a1 @@ -352,7 +352,7 @@ define i32 @dont_fold_srem_one(i32 %x) nounwind { ; CHECK-LABEL: dont_fold_srem_one: ; CHECK: # %bb.0: -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ret %1 = srem i32 %x, 1 ret i32 %1 @@ -409,8 +409,8 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 98 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a2, 98 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __moddi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -420,8 +420,8 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: addi sp, sp, -16 ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IM-NEXT: addi a2, zero, 98 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a2, 98 +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __moddi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -431,7 +431,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 98 +; RV64I-NEXT: li a1, 98 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -451,7 +451,7 @@ ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 5 ; RV64IM-NEXT: add a1, a1, a2 -; RV64IM-NEXT: addi a2, zero, 98 +; RV64IM-NEXT: li a2, 98 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -122,7 +122,7 @@ ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: slli a0, a0, 28 ; RV32-NEXT: srai a0, a0, 28 -; RV32-NEXT: addi a1, zero, 6 +; RV32-NEXT: li a1, 6 ; RV32-NEXT: call __modsi3@plt ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: seqz a0, a0 @@ -136,7 +136,7 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: slli a0, a0, 60 ; RV64-NEXT: srai a0, a0, 60 -; RV64-NEXT: addi a1, zero, 6 +; RV64-NEXT: li a1, 6 ; RV64-NEXT: call __moddi3@plt ; RV64-NEXT: addi a0, a0, -1 ; RV64-NEXT: seqz a0, a0 @@ -153,7 +153,7 @@ ; RV32M-NEXT: mulh a1, a0, a1 ; RV32M-NEXT: srli a2, a1, 31 ; RV32M-NEXT: add a1, a1, a2 -; RV32M-NEXT: addi a2, zero, 6 +; RV32M-NEXT: li a2, 6 ; RV32M-NEXT: mul a1, a1, a2 ; RV32M-NEXT: sub a0, a0, a1 ; RV32M-NEXT: addi a0, a0, -1 @@ -175,7 +175,7 @@ ; RV64M-NEXT: mulh a1, a0, a1 ; RV64M-NEXT: srli a2, a1, 63 ; RV64M-NEXT: add a1, a1, a2 -; RV64M-NEXT: addi a2, zero, 6 +; RV64M-NEXT: li a2, 6 ; RV64M-NEXT: mul a1, a1, a2 ; RV64M-NEXT: sub a0, a0, a1 ; RV64M-NEXT: addi a0, a0, -1 @@ -191,7 +191,7 @@ ; RV32MV-NEXT: mulh a1, a0, a1 ; RV32MV-NEXT: srli a2, a1, 31 ; RV32MV-NEXT: add a1, a1, a2 -; RV32MV-NEXT: addi a2, zero, 6 +; RV32MV-NEXT: li a2, 6 ; RV32MV-NEXT: mul a1, a1, a2 ; RV32MV-NEXT: sub a0, a0, a1 ; RV32MV-NEXT: addi a0, a0, -1 @@ -213,7 +213,7 @@ ; RV64MV-NEXT: mulh a1, a0, a1 ; RV64MV-NEXT: srli a2, a1, 63 ; RV64MV-NEXT: add a1, a1, a2 -; RV64MV-NEXT: addi a2, zero, 6 +; RV64MV-NEXT: li a2, 6 ; RV64MV-NEXT: mul a1, a1, a2 ; RV64MV-NEXT: sub a0, a0, a1 ; RV64MV-NEXT: addi a0, a0, -1 @@ -338,22 +338,22 @@ ; RV32-NEXT: srli a1, a2, 1 ; RV32-NEXT: andi a1, a1, 1 ; RV32-NEXT: neg a1, a1 -; RV32-NEXT: addi a2, zero, 7 -; RV32-NEXT: mv a3, zero +; RV32-NEXT: li a2, 7 +; RV32-NEXT: li a3, 0 ; RV32-NEXT: call __moddi3@plt ; RV32-NEXT: mv s5, a0 ; RV32-NEXT: mv s6, a1 -; RV32-NEXT: addi a2, zero, -5 -; RV32-NEXT: addi a3, zero, -1 +; RV32-NEXT: li a2, -5 +; RV32-NEXT: li a3, -1 ; RV32-NEXT: mv a0, s3 ; RV32-NEXT: mv a1, s1 ; RV32-NEXT: call __moddi3@plt ; RV32-NEXT: mv s1, a0 ; RV32-NEXT: mv s3, a1 -; RV32-NEXT: addi a2, zero, 6 +; RV32-NEXT: li a2, 6 ; RV32-NEXT: mv a0, s4 ; RV32-NEXT: mv a1, s2 -; RV32-NEXT: mv a3, zero +; RV32-NEXT: li a3, 0 ; RV32-NEXT: call __moddi3@plt ; RV32-NEXT: xori a2, s1, 2 ; RV32-NEXT: or a2, a2, s3 @@ -407,7 +407,7 @@ ; RV64-NEXT: lwu a1, 8(s0) ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: addi s4, zero, -1 +; RV64-NEXT: li s4, -1 ; RV64-NEXT: srli a1, s4, 24 ; RV64-NEXT: and a0, a0, a1 ; RV64-NEXT: ld a1, 0(s0) @@ -420,11 +420,11 @@ ; RV64-NEXT: srai a0, a0, 31 ; RV64-NEXT: slli a1, a1, 31 ; RV64-NEXT: srai s2, a1, 31 -; RV64-NEXT: addi a1, zero, 7 -; RV64-NEXT: addi s5, zero, 7 +; RV64-NEXT: li a1, 7 +; RV64-NEXT: li s5, 7 ; RV64-NEXT: call __moddi3@plt ; RV64-NEXT: mv s3, a0 -; RV64-NEXT: addi a1, zero, -5 +; RV64-NEXT: li a1, -5 ; RV64-NEXT: mv a0, s1 ; RV64-NEXT: call __moddi3@plt ; RV64-NEXT: mv s1, a0 @@ -512,22 +512,22 @@ ; RV32M-NEXT: srli a1, a2, 1 ; RV32M-NEXT: andi a1, a1, 1 ; RV32M-NEXT: neg a1, a1 -; RV32M-NEXT: addi a2, zero, 7 -; RV32M-NEXT: mv a3, zero +; RV32M-NEXT: li a2, 7 +; RV32M-NEXT: li a3, 0 ; RV32M-NEXT: call __moddi3@plt ; RV32M-NEXT: mv s5, a0 ; RV32M-NEXT: mv s6, a1 -; RV32M-NEXT: addi a2, zero, -5 -; RV32M-NEXT: addi a3, zero, -1 +; RV32M-NEXT: li a2, -5 +; RV32M-NEXT: li a3, -1 ; RV32M-NEXT: mv a0, s3 ; RV32M-NEXT: mv a1, s1 ; RV32M-NEXT: call __moddi3@plt ; RV32M-NEXT: mv s1, a0 ; RV32M-NEXT: mv s3, a1 -; RV32M-NEXT: addi a2, zero, 6 +; RV32M-NEXT: li a2, 6 ; RV32M-NEXT: mv a0, s4 ; RV32M-NEXT: mv a1, s2 -; RV32M-NEXT: mv a3, zero +; RV32M-NEXT: li a3, 0 ; RV32M-NEXT: call __moddi3@plt ; RV32M-NEXT: xori a2, s1, 2 ; RV32M-NEXT: or a2, a2, s3 @@ -572,7 +572,7 @@ ; RV64M-NEXT: lwu a2, 8(a0) ; RV64M-NEXT: slli a1, a1, 32 ; RV64M-NEXT: or a2, a2, a1 -; RV64M-NEXT: addi a6, zero, -1 +; RV64M-NEXT: li a6, -1 ; RV64M-NEXT: srli a3, a6, 24 ; RV64M-NEXT: and a2, a2, a3 ; RV64M-NEXT: ld a3, 0(a0) @@ -644,7 +644,7 @@ ; RV64M-NEXT: neg a1, a1 ; RV64M-NEXT: neg a4, a2 ; RV64M-NEXT: neg a3, a3 -; RV64M-NEXT: addi a5, zero, 7 +; RV64M-NEXT: li a5, 7 ; RV64M-NEXT: slli a5, a5, 32 ; RV64M-NEXT: and a4, a4, a5 ; RV64M-NEXT: srli a4, a4, 32 @@ -692,26 +692,26 @@ ; RV32MV-NEXT: neg s5, a2 ; RV32MV-NEXT: andi a1, a1, 1 ; RV32MV-NEXT: neg a1, a1 -; RV32MV-NEXT: addi a2, zero, 6 -; RV32MV-NEXT: mv a3, zero +; RV32MV-NEXT: li a2, 6 +; RV32MV-NEXT: li a3, 0 ; RV32MV-NEXT: call __moddi3@plt ; RV32MV-NEXT: sw a1, 36(sp) ; RV32MV-NEXT: sw a0, 32(sp) -; RV32MV-NEXT: addi a2, zero, -5 -; RV32MV-NEXT: addi a3, zero, -1 +; RV32MV-NEXT: li a2, -5 +; RV32MV-NEXT: li a3, -1 ; RV32MV-NEXT: mv a0, s4 ; RV32MV-NEXT: mv a1, s5 ; RV32MV-NEXT: call __moddi3@plt ; RV32MV-NEXT: sw a1, 52(sp) ; RV32MV-NEXT: sw a0, 48(sp) -; RV32MV-NEXT: addi a2, zero, 7 +; RV32MV-NEXT: li a2, 7 ; RV32MV-NEXT: mv a0, s2 ; RV32MV-NEXT: mv a1, s3 -; RV32MV-NEXT: mv a3, zero +; RV32MV-NEXT: li a3, 0 ; RV32MV-NEXT: call __moddi3@plt ; RV32MV-NEXT: sw a1, 44(sp) ; RV32MV-NEXT: sw a0, 40(sp) -; RV32MV-NEXT: addi a0, zero, 85 +; RV32MV-NEXT: li a0, 85 ; RV32MV-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32MV-NEXT: vmv.s.x v0, a0 ; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -776,7 +776,7 @@ ; RV64MV-NEXT: lwu a2, 8(a0) ; RV64MV-NEXT: slli a1, a1, 32 ; RV64MV-NEXT: or a2, a2, a1 -; RV64MV-NEXT: addi a6, zero, -1 +; RV64MV-NEXT: li a6, -1 ; RV64MV-NEXT: ld a3, 0(a0) ; RV64MV-NEXT: srli a4, a6, 24 ; RV64MV-NEXT: and a2, a2, a4 @@ -800,7 +800,7 @@ ; RV64MV-NEXT: mulh a5, a3, a5 ; RV64MV-NEXT: srli a1, a5, 63 ; RV64MV-NEXT: add a1, a5, a1 -; RV64MV-NEXT: addi a5, zero, 6 +; RV64MV-NEXT: li a5, 6 ; RV64MV-NEXT: mul a1, a1, a5 ; RV64MV-NEXT: sub a1, a3, a1 ; RV64MV-NEXT: sd a1, 32(sp) diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll @@ -24,19 +24,19 @@ ; RV32I-NEXT: lh s0, 4(a1) ; RV32I-NEXT: lh a2, 0(a1) ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: addi a1, zero, -124 +; RV32I-NEXT: li a1, -124 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s5, a0 -; RV32I-NEXT: addi a1, zero, 98 +; RV32I-NEXT: li a1, 98 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, -1003 +; RV32I-NEXT: li a1, -1003 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: sh a0, 6(s1) @@ -66,7 +66,7 @@ ; RV32IM-NEXT: srli a2, a5, 31 ; RV32IM-NEXT: srli a5, a5, 6 ; RV32IM-NEXT: add a2, a5, a2 -; RV32IM-NEXT: addi a5, zero, 95 +; RV32IM-NEXT: li a5, 95 ; RV32IM-NEXT: mul a2, a2, a5 ; RV32IM-NEXT: sub a2, a4, a2 ; RV32IM-NEXT: lui a4, 507375 @@ -76,7 +76,7 @@ ; RV32IM-NEXT: srli a5, a4, 31 ; RV32IM-NEXT: srli a4, a4, 6 ; RV32IM-NEXT: add a4, a4, a5 -; RV32IM-NEXT: addi a5, zero, -124 +; RV32IM-NEXT: li a5, -124 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a1, a1, a4 ; RV32IM-NEXT: lui a4, 342392 @@ -85,7 +85,7 @@ ; RV32IM-NEXT: srli a5, a4, 31 ; RV32IM-NEXT: srli a4, a4, 5 ; RV32IM-NEXT: add a4, a4, a5 -; RV32IM-NEXT: addi a5, zero, 98 +; RV32IM-NEXT: li a5, 98 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a3, a3, a4 ; RV32IM-NEXT: lui a4, 780943 @@ -94,7 +94,7 @@ ; RV32IM-NEXT: srli a5, a4, 31 ; RV32IM-NEXT: srli a4, a4, 8 ; RV32IM-NEXT: add a4, a4, a5 -; RV32IM-NEXT: addi a5, zero, -1003 +; RV32IM-NEXT: li a5, -1003 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a4, a6, a4 ; RV32IM-NEXT: sh a4, 6(a0) @@ -118,19 +118,19 @@ ; RV64I-NEXT: lh s0, 8(a1) ; RV64I-NEXT: lh a2, 0(a1) ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: addi a1, zero, -124 +; RV64I-NEXT: li a1, -124 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s5, a0 -; RV64I-NEXT: addi a1, zero, 98 +; RV64I-NEXT: li a1, 98 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, -1003 +; RV64I-NEXT: li a1, -1003 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: sh a0, 6(s1) @@ -166,7 +166,7 @@ ; RV64IM-NEXT: srli a2, a5, 63 ; RV64IM-NEXT: srli a5, a5, 6 ; RV64IM-NEXT: addw a2, a5, a2 -; RV64IM-NEXT: addi a5, zero, 95 +; RV64IM-NEXT: li a5, 95 ; RV64IM-NEXT: mulw a2, a2, a5 ; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: lui a2, 777976 @@ -181,7 +181,7 @@ ; RV64IM-NEXT: srli a5, a2, 63 ; RV64IM-NEXT: srli a2, a2, 6 ; RV64IM-NEXT: addw a2, a2, a5 -; RV64IM-NEXT: addi a5, zero, -124 +; RV64IM-NEXT: li a5, -124 ; RV64IM-NEXT: mulw a2, a2, a5 ; RV64IM-NEXT: subw a2, a4, a2 ; RV64IM-NEXT: lui a4, 2675 @@ -196,7 +196,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 5 ; RV64IM-NEXT: addw a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, 98 +; RV64IM-NEXT: li a5, 98 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 1040212 @@ -211,7 +211,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 7 ; RV64IM-NEXT: addw a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, -1003 +; RV64IM-NEXT: li a5, -1003 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a4, a6, a4 ; RV64IM-NEXT: sh a4, 6(a0) @@ -239,19 +239,19 @@ ; RV32I-NEXT: lh s0, 4(a1) ; RV32I-NEXT: lh a2, 0(a1) ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s5, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: sh a0, 6(s1) @@ -281,7 +281,7 @@ ; RV32IM-NEXT: srli a7, a2, 31 ; RV32IM-NEXT: srli a2, a2, 6 ; RV32IM-NEXT: add a2, a2, a7 -; RV32IM-NEXT: addi a7, zero, 95 +; RV32IM-NEXT: li a7, 95 ; RV32IM-NEXT: mul a2, a2, a7 ; RV32IM-NEXT: sub t0, a4, a2 ; RV32IM-NEXT: mulh a4, a1, a5 @@ -326,19 +326,19 @@ ; RV64I-NEXT: lh s0, 8(a1) ; RV64I-NEXT: lh a2, 0(a1) ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s5, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: sh a0, 6(s1) @@ -374,7 +374,7 @@ ; RV64IM-NEXT: srli a3, a2, 63 ; RV64IM-NEXT: srli a2, a2, 6 ; RV64IM-NEXT: addw a2, a2, a3 -; RV64IM-NEXT: addi a3, zero, 95 +; RV64IM-NEXT: li a3, 95 ; RV64IM-NEXT: mulw a2, a2, a3 ; RV64IM-NEXT: subw t0, a1, a2 ; RV64IM-NEXT: mulh a2, a4, a5 @@ -429,35 +429,35 @@ ; RV32I-NEXT: lh s4, 8(a1) ; RV32I-NEXT: lh s1, 12(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s5, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s6, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s7, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s8, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: mv s9, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __divsi3@plt ; RV32I-NEXT: add a0, s8, a0 @@ -495,7 +495,7 @@ ; RV32IM-NEXT: srli a7, a2, 31 ; RV32IM-NEXT: srai a2, a2, 6 ; RV32IM-NEXT: add t0, a2, a7 -; RV32IM-NEXT: addi a7, zero, 95 +; RV32IM-NEXT: li a7, 95 ; RV32IM-NEXT: mul a2, t0, a7 ; RV32IM-NEXT: sub t1, a4, a2 ; RV32IM-NEXT: mulh a4, a1, a5 @@ -548,35 +548,35 @@ ; RV64I-NEXT: lh s4, 16(a1) ; RV64I-NEXT: lh s1, 24(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s5, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s6, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s7, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s8, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: mv s9, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __divdi3@plt ; RV64I-NEXT: addw a0, s8, a0 @@ -620,7 +620,7 @@ ; RV64IM-NEXT: srli a3, a2, 63 ; RV64IM-NEXT: srai a2, a2, 6 ; RV64IM-NEXT: addw t3, a2, a3 -; RV64IM-NEXT: addi t0, zero, 95 +; RV64IM-NEXT: li t0, 95 ; RV64IM-NEXT: mulw a3, t3, t0 ; RV64IM-NEXT: subw t1, a1, a3 ; RV64IM-NEXT: mulh a3, a4, a5 @@ -686,7 +686,7 @@ ; RV32I-NEXT: add a1, a3, a1 ; RV32I-NEXT: andi a1, a1, -8 ; RV32I-NEXT: sub s1, a3, a1 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: sh a0, 6(s0) ; RV32I-NEXT: sh s1, 4(s0) @@ -713,7 +713,7 @@ ; RV32IM-NEXT: srli a6, a5, 31 ; RV32IM-NEXT: srli a5, a5, 6 ; RV32IM-NEXT: add a6, a5, a6 -; RV32IM-NEXT: addi a5, zero, 95 +; RV32IM-NEXT: li a5, 95 ; RV32IM-NEXT: mul a5, a6, a5 ; RV32IM-NEXT: sub a4, a4, a5 ; RV32IM-NEXT: srli a5, a1, 26 @@ -759,7 +759,7 @@ ; RV64I-NEXT: add a1, a3, a1 ; RV64I-NEXT: andi a1, a1, -8 ; RV64I-NEXT: subw s1, a3, a1 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: sh a0, 6(s0) ; RV64I-NEXT: sh s1, 4(s0) @@ -792,7 +792,7 @@ ; RV64IM-NEXT: srli a2, a5, 63 ; RV64IM-NEXT: srli a5, a5, 6 ; RV64IM-NEXT: addw a2, a5, a2 -; RV64IM-NEXT: addi a5, zero, 95 +; RV64IM-NEXT: li a5, 95 ; RV64IM-NEXT: mulw a2, a2, a5 ; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: srli a2, a4, 58 @@ -830,11 +830,11 @@ ; RV32I-NEXT: lh s1, 8(a1) ; RV32I-NEXT: lh a2, 4(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 654 +; RV32I-NEXT: li a1, 654 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s3, a0 -; RV32I-NEXT: addi a1, zero, 23 +; RV32I-NEXT: li a1, 23 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s1, a0 @@ -866,7 +866,7 @@ ; RV32IM-NEXT: srli a5, a4, 31 ; RV32IM-NEXT: srli a4, a4, 9 ; RV32IM-NEXT: add a4, a4, a5 -; RV32IM-NEXT: addi a5, zero, 654 +; RV32IM-NEXT: li a5, 654 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a3, a3, a4 ; RV32IM-NEXT: lui a4, 729444 @@ -876,7 +876,7 @@ ; RV32IM-NEXT: srli a5, a4, 31 ; RV32IM-NEXT: srli a4, a4, 4 ; RV32IM-NEXT: add a4, a4, a5 -; RV32IM-NEXT: addi a5, zero, 23 +; RV32IM-NEXT: li a5, 23 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a1, a1, a4 ; RV32IM-NEXT: lui a4, 395996 @@ -907,11 +907,11 @@ ; RV64I-NEXT: lh s1, 16(a1) ; RV64I-NEXT: lh a2, 8(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 654 +; RV64I-NEXT: li a1, 654 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s3, a0 -; RV64I-NEXT: addi a1, zero, 23 +; RV64I-NEXT: li a1, 23 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s1, a0 @@ -949,7 +949,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 4 ; RV64IM-NEXT: addw a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, 23 +; RV64IM-NEXT: li a5, 23 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a1, a1, a4 ; RV64IM-NEXT: lui a4, 6413 @@ -964,7 +964,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 8 ; RV64IM-NEXT: addw a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, 654 +; RV64IM-NEXT: li a5, 654 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 12375 @@ -1011,7 +1011,7 @@ ; RV32I-NEXT: lui a3, 8 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: sub s3, a2, a1 -; RV32I-NEXT: addi a1, zero, 23 +; RV32I-NEXT: li a1, 23 ; RV32I-NEXT: call __modsi3@plt ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 1 @@ -1042,7 +1042,7 @@ ; RV32IM-NEXT: srli a5, a4, 31 ; RV32IM-NEXT: srli a4, a4, 4 ; RV32IM-NEXT: add a4, a4, a5 -; RV32IM-NEXT: addi a5, zero, 23 +; RV32IM-NEXT: li a5, 23 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a3, a3, a4 ; RV32IM-NEXT: lui a4, 395996 @@ -1083,7 +1083,7 @@ ; RV64I-NEXT: lui a3, 8 ; RV64I-NEXT: and a1, a1, a3 ; RV64I-NEXT: subw s3, a2, a1 -; RV64I-NEXT: addi a1, zero, 23 +; RV64I-NEXT: li a1, 23 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 1 @@ -1120,7 +1120,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 4 ; RV64IM-NEXT: addw a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, 23 +; RV64IM-NEXT: li a5, 23 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a1, a1, a4 ; RV64IM-NEXT: lui a4, 12375 @@ -1178,23 +1178,23 @@ ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: mv a0, a3 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __moddi3@plt ; RV32I-NEXT: mv s7, a0 ; RV32I-NEXT: mv s8, a1 -; RV32I-NEXT: addi a2, zero, 654 +; RV32I-NEXT: li a2, 654 ; RV32I-NEXT: mv a0, s6 ; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __moddi3@plt ; RV32I-NEXT: mv s6, a0 ; RV32I-NEXT: mv s9, a1 -; RV32I-NEXT: addi a2, zero, 23 +; RV32I-NEXT: li a2, 23 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s5 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __moddi3@plt ; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv s1, a1 @@ -1202,7 +1202,7 @@ ; RV32I-NEXT: addi a2, a0, 1327 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s3 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __moddi3@plt ; RV32I-NEXT: sw a1, 28(s0) ; RV32I-NEXT: sw a0, 24(s0) @@ -1249,23 +1249,23 @@ ; RV32IM-NEXT: lw a3, 0(a1) ; RV32IM-NEXT: lw a1, 4(a1) ; RV32IM-NEXT: mv s0, a0 -; RV32IM-NEXT: addi a2, zero, 1 +; RV32IM-NEXT: li a2, 1 ; RV32IM-NEXT: mv a0, a3 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __moddi3@plt ; RV32IM-NEXT: mv s7, a0 ; RV32IM-NEXT: mv s8, a1 -; RV32IM-NEXT: addi a2, zero, 654 +; RV32IM-NEXT: li a2, 654 ; RV32IM-NEXT: mv a0, s6 ; RV32IM-NEXT: mv a1, s1 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __moddi3@plt ; RV32IM-NEXT: mv s6, a0 ; RV32IM-NEXT: mv s9, a1 -; RV32IM-NEXT: addi a2, zero, 23 +; RV32IM-NEXT: li a2, 23 ; RV32IM-NEXT: mv a0, s4 ; RV32IM-NEXT: mv a1, s5 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __moddi3@plt ; RV32IM-NEXT: mv s4, a0 ; RV32IM-NEXT: mv s1, a1 @@ -1273,7 +1273,7 @@ ; RV32IM-NEXT: addi a2, a0, 1327 ; RV32IM-NEXT: mv a0, s2 ; RV32IM-NEXT: mv a1, s3 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __moddi3@plt ; RV32IM-NEXT: sw a1, 28(s0) ; RV32IM-NEXT: sw a0, 24(s0) @@ -1309,11 +1309,11 @@ ; RV64I-NEXT: ld s1, 16(a1) ; RV64I-NEXT: ld a2, 8(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 654 +; RV64I-NEXT: li a1, 654 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s3, a0 -; RV64I-NEXT: addi a1, zero, 23 +; RV64I-NEXT: li a1, 23 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s1, a0 @@ -1351,7 +1351,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srai a4, a4, 4 ; RV64IM-NEXT: add a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, 23 +; RV64IM-NEXT: li a5, 23 ; RV64IM-NEXT: mul a4, a4, a5 ; RV64IM-NEXT: sub a1, a1, a4 ; RV64IM-NEXT: lui a4, 6413 @@ -1366,7 +1366,7 @@ ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srai a4, a4, 8 ; RV64IM-NEXT: add a4, a4, a5 -; RV64IM-NEXT: addi a5, zero, 654 +; RV64IM-NEXT: li a5, 654 ; RV64IM-NEXT: mul a4, a4, a5 ; RV64IM-NEXT: sub a3, a3, a4 ; RV64IM-NEXT: lui a4, 12375 diff --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll --- a/llvm/test/CodeGen/RISCV/ssub_sat.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll @@ -111,7 +111,7 @@ ; RV64I-NEXT: beq a3, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srai a0, a0, 63 -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: .LBB1_2: @@ -145,7 +145,7 @@ ; RV64IZbbNOZbt-NEXT: beq a3, a1, .LBB1_2 ; RV64IZbbNOZbt-NEXT: # %bb.1: ; RV64IZbbNOZbt-NEXT: srai a0, a0, 63 -; RV64IZbbNOZbt-NEXT: addi a1, zero, -1 +; RV64IZbbNOZbt-NEXT: li a1, -1 ; RV64IZbbNOZbt-NEXT: slli a1, a1, 63 ; RV64IZbbNOZbt-NEXT: xor a0, a0, a1 ; RV64IZbbNOZbt-NEXT: .LBB1_2: @@ -175,7 +175,7 @@ ; RV64IZbbZbt-NEXT: slt a0, a1, a0 ; RV64IZbbZbt-NEXT: xor a0, a2, a0 ; RV64IZbbZbt-NEXT: srai a2, a1, 63 -; RV64IZbbZbt-NEXT: addi a3, zero, -1 +; RV64IZbbZbt-NEXT: li a3, -1 ; RV64IZbbZbt-NEXT: slli a3, a3, 63 ; RV64IZbbZbt-NEXT: xor a2, a2, a3 ; RV64IZbbZbt-NEXT: cmov a0, a0, a2, a1 @@ -250,54 +250,54 @@ ; RV32I-LABEL: func8: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 127 +; RV32I-NEXT: li a1, 127 ; RV32I-NEXT: bge a0, a1, .LBB3_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: bge a1, a0, .LBB3_4 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB3_3: -; RV32I-NEXT: addi a0, zero, 127 -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a0, 127 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: blt a1, a0, .LBB3_2 ; RV32I-NEXT: .LBB3_4: -; RV32I-NEXT: addi a0, zero, -128 +; RV32I-NEXT: li a0, -128 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: ; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 127 +; RV64I-NEXT: li a1, 127 ; RV64I-NEXT: bge a0, a1, .LBB3_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: bge a1, a0, .LBB3_4 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB3_3: -; RV64I-NEXT: addi a0, zero, 127 -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a0, 127 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: blt a1, a0, .LBB3_2 ; RV64I-NEXT: .LBB3_4: -; RV64I-NEXT: addi a0, zero, -128 +; RV64I-NEXT: li a0, -128 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: sub a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 127 +; RV32IZbb-NEXT: li a1, 127 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -128 +; RV32IZbb-NEXT: li a1, -128 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func8: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: sub a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 127 +; RV64IZbb-NEXT: li a1, 127 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -128 +; RV64IZbb-NEXT: li a1, -128 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %y); @@ -308,54 +308,54 @@ ; RV32I-LABEL: func3: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 7 +; RV32I-NEXT: li a1, 7 ; RV32I-NEXT: bge a0, a1, .LBB4_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: bge a1, a0, .LBB4_4 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_3: -; RV32I-NEXT: addi a0, zero, 7 -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a0, 7 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: blt a1, a0, .LBB4_2 ; RV32I-NEXT: .LBB4_4: -; RV32I-NEXT: addi a0, zero, -8 +; RV32I-NEXT: li a0, -8 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func3: ; RV64I: # %bb.0: ; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 7 +; RV64I-NEXT: li a1, 7 ; RV64I-NEXT: bge a0, a1, .LBB4_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: bge a1, a0, .LBB4_4 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_3: -; RV64I-NEXT: addi a0, zero, 7 -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a0, 7 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: blt a1, a0, .LBB4_2 ; RV64I-NEXT: .LBB4_4: -; RV64I-NEXT: addi a0, zero, -8 +; RV64I-NEXT: li a0, -8 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func3: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: sub a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 7 +; RV32IZbb-NEXT: li a1, 7 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -8 +; RV32IZbb-NEXT: li a1, -8 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func3: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: sub a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 7 +; RV64IZbb-NEXT: li a1, 7 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -8 +; RV64IZbb-NEXT: li a1, -8 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y); diff --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll @@ -119,7 +119,7 @@ ; RV64I-NEXT: beq a3, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srai a0, a0, 63 -; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: li a1, -1 ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: .LBB1_2: @@ -153,7 +153,7 @@ ; RV64IZbbNOZbt-NEXT: beq a3, a1, .LBB1_2 ; RV64IZbbNOZbt-NEXT: # %bb.1: ; RV64IZbbNOZbt-NEXT: srai a0, a0, 63 -; RV64IZbbNOZbt-NEXT: addi a1, zero, -1 +; RV64IZbbNOZbt-NEXT: li a1, -1 ; RV64IZbbNOZbt-NEXT: slli a1, a1, 63 ; RV64IZbbNOZbt-NEXT: xor a0, a0, a1 ; RV64IZbbNOZbt-NEXT: .LBB1_2: @@ -183,7 +183,7 @@ ; RV64IZbbZbt-NEXT: slt a0, a2, a0 ; RV64IZbbZbt-NEXT: xor a0, a1, a0 ; RV64IZbbZbt-NEXT: srai a1, a2, 63 -; RV64IZbbZbt-NEXT: addi a3, zero, -1 +; RV64IZbbZbt-NEXT: li a3, -1 ; RV64IZbbZbt-NEXT: slli a3, a3, 63 ; RV64IZbbZbt-NEXT: xor a1, a1, a3 ; RV64IZbbZbt-NEXT: cmov a0, a0, a1, a2 @@ -281,19 +281,19 @@ ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: srai a1, a1, 24 ; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 127 +; RV32I-NEXT: li a1, 127 ; RV32I-NEXT: bge a0, a1, .LBB3_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: bge a1, a0, .LBB3_4 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB3_3: -; RV32I-NEXT: addi a0, zero, 127 -; RV32I-NEXT: addi a1, zero, -128 +; RV32I-NEXT: li a0, 127 +; RV32I-NEXT: li a1, -128 ; RV32I-NEXT: blt a1, a0, .LBB3_2 ; RV32I-NEXT: .LBB3_4: -; RV32I-NEXT: addi a0, zero, -128 +; RV32I-NEXT: li a0, -128 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: @@ -304,19 +304,19 @@ ; RV64I-NEXT: slli a1, a1, 56 ; RV64I-NEXT: srai a1, a1, 56 ; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 127 +; RV64I-NEXT: li a1, 127 ; RV64I-NEXT: bge a0, a1, .LBB3_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: bge a1, a0, .LBB3_4 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB3_3: -; RV64I-NEXT: addi a0, zero, 127 -; RV64I-NEXT: addi a1, zero, -128 +; RV64I-NEXT: li a0, 127 +; RV64I-NEXT: li a1, -128 ; RV64I-NEXT: blt a1, a0, .LBB3_2 ; RV64I-NEXT: .LBB3_4: -; RV64I-NEXT: addi a0, zero, -128 +; RV64I-NEXT: li a0, -128 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: @@ -325,9 +325,9 @@ ; RV32IZbb-NEXT: mul a1, a1, a2 ; RV32IZbb-NEXT: sext.b a1, a1 ; RV32IZbb-NEXT: sub a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 127 +; RV32IZbb-NEXT: li a1, 127 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -128 +; RV32IZbb-NEXT: li a1, -128 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; @@ -337,9 +337,9 @@ ; RV64IZbb-NEXT: mul a1, a1, a2 ; RV64IZbb-NEXT: sext.b a1, a1 ; RV64IZbb-NEXT: sub a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 127 +; RV64IZbb-NEXT: li a1, 127 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -128 +; RV64IZbb-NEXT: li a1, -128 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %a = mul i8 %y, %z @@ -356,19 +356,19 @@ ; RV32I-NEXT: slli a1, a1, 28 ; RV32I-NEXT: srai a1, a1, 28 ; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 7 +; RV32I-NEXT: li a1, 7 ; RV32I-NEXT: bge a0, a1, .LBB4_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: bge a1, a0, .LBB4_4 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_3: -; RV32I-NEXT: addi a0, zero, 7 -; RV32I-NEXT: addi a1, zero, -8 +; RV32I-NEXT: li a0, 7 +; RV32I-NEXT: li a1, -8 ; RV32I-NEXT: blt a1, a0, .LBB4_2 ; RV32I-NEXT: .LBB4_4: -; RV32I-NEXT: addi a0, zero, -8 +; RV32I-NEXT: li a0, -8 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func4: @@ -379,19 +379,19 @@ ; RV64I-NEXT: slli a1, a1, 60 ; RV64I-NEXT: srai a1, a1, 60 ; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 7 +; RV64I-NEXT: li a1, 7 ; RV64I-NEXT: bge a0, a1, .LBB4_3 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: bge a1, a0, .LBB4_4 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_3: -; RV64I-NEXT: addi a0, zero, 7 -; RV64I-NEXT: addi a1, zero, -8 +; RV64I-NEXT: li a0, 7 +; RV64I-NEXT: li a1, -8 ; RV64I-NEXT: blt a1, a0, .LBB4_2 ; RV64I-NEXT: .LBB4_4: -; RV64I-NEXT: addi a0, zero, -8 +; RV64I-NEXT: li a0, -8 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func4: @@ -402,9 +402,9 @@ ; RV32IZbb-NEXT: slli a1, a1, 28 ; RV32IZbb-NEXT: srai a1, a1, 28 ; RV32IZbb-NEXT: sub a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 7 +; RV32IZbb-NEXT: li a1, 7 ; RV32IZbb-NEXT: min a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, -8 +; RV32IZbb-NEXT: li a1, -8 ; RV32IZbb-NEXT: max a0, a0, a1 ; RV32IZbb-NEXT: ret ; @@ -416,9 +416,9 @@ ; RV64IZbb-NEXT: slli a1, a1, 60 ; RV64IZbb-NEXT: srai a1, a1, 60 ; RV64IZbb-NEXT: sub a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 7 +; RV64IZbb-NEXT: li a1, 7 ; RV64IZbb-NEXT: min a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, -8 +; RV64IZbb-NEXT: li a1, -8 ; RV64IZbb-NEXT: max a0, a0, a1 ; RV64IZbb-NEXT: ret %a = mul i4 %y, %z diff --git a/llvm/test/CodeGen/RISCV/stack-slot-size.ll b/llvm/test/CodeGen/RISCV/stack-slot-size.ll --- a/llvm/test/CodeGen/RISCV/stack-slot-size.ll +++ b/llvm/test/CodeGen/RISCV/stack-slot-size.ll @@ -18,7 +18,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a0, zero, 42 +; RV32I-NEXT: li a0, 42 ; RV32I-NEXT: sw a0, 24(sp) ; RV32I-NEXT: sw zero, 16(sp) ; RV32I-NEXT: sw zero, 12(sp) @@ -36,7 +36,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -48 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 42 +; RV64I-NEXT: li a0, 42 ; RV64I-NEXT: sw a0, 36(sp) ; RV64I-NEXT: sd zero, 16(sp) ; RV64I-NEXT: sd zero, 8(sp) @@ -59,7 +59,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a0, zero, 42 +; RV32I-NEXT: li a0, 42 ; RV32I-NEXT: sw a0, 24(sp) ; RV32I-NEXT: sw zero, 16(sp) ; RV32I-NEXT: sw zero, 12(sp) @@ -77,7 +77,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -48 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 42 +; RV64I-NEXT: li a0, 42 ; RV64I-NEXT: sw a0, 36(sp) ; RV64I-NEXT: sd zero, 16(sp) ; RV64I-NEXT: sd zero, 8(sp) @@ -100,7 +100,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a0, zero, 42 +; RV32I-NEXT: li a0, 42 ; RV32I-NEXT: sw a0, 24(sp) ; RV32I-NEXT: sw zero, 20(sp) ; RV32I-NEXT: sw zero, 16(sp) @@ -119,7 +119,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -48 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a0, zero, 42 +; RV64I-NEXT: li a0, 42 ; RV64I-NEXT: sw a0, 36(sp) ; RV64I-NEXT: sd zero, 16(sp) ; RV64I-NEXT: sd zero, 8(sp) diff --git a/llvm/test/CodeGen/RISCV/uadd_sat.ll b/llvm/test/CodeGen/RISCV/uadd_sat.ll --- a/llvm/test/CodeGen/RISCV/uadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat.ll @@ -15,7 +15,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: addi a0, zero, -1 +; RV32I-NEXT: li a0, -1 ; RV32I-NEXT: bltu a1, a2, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -26,7 +26,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: addw a1, a0, a1 -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: bltu a1, a2, .LBB0_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -61,8 +61,8 @@ ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a4, a3, a1 ; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a0, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: bnez a4, .LBB1_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a2 @@ -74,7 +74,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: add a1, a0, a1 -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: bltu a1, a2, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -91,8 +91,8 @@ ; RV32IZbb-NEXT: # %bb.1: ; RV32IZbb-NEXT: sltu a4, a3, a1 ; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: addi a0, zero, -1 -; RV32IZbb-NEXT: addi a1, zero, -1 +; RV32IZbb-NEXT: li a0, -1 +; RV32IZbb-NEXT: li a1, -1 ; RV32IZbb-NEXT: bnez a4, .LBB1_4 ; RV32IZbb-NEXT: # %bb.3: ; RV32IZbb-NEXT: mv a0, a2 @@ -156,34 +156,34 @@ ; RV32I-LABEL: func8: ; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 255 +; RV32I-NEXT: li a1, 255 ; RV32I-NEXT: bltu a0, a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, zero, 255 +; RV32I-NEXT: li a0, 255 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 255 +; RV64I-NEXT: li a1, 255 ; RV64I-NEXT: bltu a0, a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, zero, 255 +; RV64I-NEXT: li a0, 255 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 255 +; RV32IZbb-NEXT: li a1, 255 ; RV32IZbb-NEXT: minu a0, a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func8: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 255 +; RV64IZbb-NEXT: li a1, 255 ; RV64IZbb-NEXT: minu a0, a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y); @@ -194,34 +194,34 @@ ; RV32I-LABEL: func3: ; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 15 +; RV32I-NEXT: li a1, 15 ; RV32I-NEXT: bltu a0, a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, zero, 15 +; RV32I-NEXT: li a0, 15 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func3: ; RV64I: # %bb.0: ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 15 +; RV64I-NEXT: li a1, 15 ; RV64I-NEXT: bltu a0, a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, zero, 15 +; RV64I-NEXT: li a0, 15 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func3: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 15 +; RV32IZbb-NEXT: li a1, 15 ; RV32IZbb-NEXT: minu a0, a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func3: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 15 +; RV64IZbb-NEXT: li a1, 15 ; RV64IZbb-NEXT: minu a0, a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y); diff --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll @@ -16,7 +16,7 @@ ; RV32I-NEXT: mv a3, a0 ; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: add a1, a3, a0 -; RV32I-NEXT: addi a0, zero, -1 +; RV32I-NEXT: li a0, -1 ; RV32I-NEXT: bltu a1, a3, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -28,7 +28,7 @@ ; RV64I-NEXT: mulw a1, a1, a2 ; RV64I-NEXT: addw a1, a0, a1 ; RV64I-NEXT: sext.w a2, a0 -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: bltu a1, a2, .LBB0_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -67,8 +67,8 @@ ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a4, a3, a1 ; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: li a0, -1 +; RV32I-NEXT: li a1, -1 ; RV32I-NEXT: bnez a4, .LBB1_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a2 @@ -80,7 +80,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: add a2, a0, a2 -; RV64I-NEXT: addi a0, zero, -1 +; RV64I-NEXT: li a0, -1 ; RV64I-NEXT: bltu a2, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a2 @@ -97,8 +97,8 @@ ; RV32IZbb-NEXT: # %bb.1: ; RV32IZbb-NEXT: sltu a4, a3, a1 ; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: addi a0, zero, -1 -; RV32IZbb-NEXT: addi a1, zero, -1 +; RV32IZbb-NEXT: li a0, -1 +; RV32IZbb-NEXT: li a1, -1 ; RV32IZbb-NEXT: bnez a4, .LBB1_4 ; RV32IZbb-NEXT: # %bb.3: ; RV32IZbb-NEXT: mv a0, a2 @@ -179,10 +179,10 @@ ; RV32I-NEXT: mul a1, a1, a2 ; RV32I-NEXT: andi a1, a1, 255 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 255 +; RV32I-NEXT: li a1, 255 ; RV32I-NEXT: bltu a0, a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, zero, 255 +; RV32I-NEXT: li a0, 255 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: ret ; @@ -192,10 +192,10 @@ ; RV64I-NEXT: mul a1, a1, a2 ; RV64I-NEXT: andi a1, a1, 255 ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 255 +; RV64I-NEXT: li a1, 255 ; RV64I-NEXT: bltu a0, a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, zero, 255 +; RV64I-NEXT: li a0, 255 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ret ; @@ -205,7 +205,7 @@ ; RV32IZbb-NEXT: mul a1, a1, a2 ; RV32IZbb-NEXT: andi a1, a1, 255 ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 255 +; RV32IZbb-NEXT: li a1, 255 ; RV32IZbb-NEXT: minu a0, a0, a1 ; RV32IZbb-NEXT: ret ; @@ -215,7 +215,7 @@ ; RV64IZbb-NEXT: mul a1, a1, a2 ; RV64IZbb-NEXT: andi a1, a1, 255 ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 255 +; RV64IZbb-NEXT: li a1, 255 ; RV64IZbb-NEXT: minu a0, a0, a1 ; RV64IZbb-NEXT: ret %a = mul i8 %y, %z @@ -230,10 +230,10 @@ ; RV32I-NEXT: mul a1, a1, a2 ; RV32I-NEXT: andi a1, a1, 15 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 15 +; RV32I-NEXT: li a1, 15 ; RV32I-NEXT: bltu a0, a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, zero, 15 +; RV32I-NEXT: li a0, 15 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: ret ; @@ -243,10 +243,10 @@ ; RV64I-NEXT: mul a1, a1, a2 ; RV64I-NEXT: andi a1, a1, 15 ; RV64I-NEXT: add a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 15 +; RV64I-NEXT: li a1, 15 ; RV64I-NEXT: bltu a0, a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, zero, 15 +; RV64I-NEXT: li a0, 15 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ret ; @@ -256,7 +256,7 @@ ; RV32IZbb-NEXT: mul a1, a1, a2 ; RV32IZbb-NEXT: andi a1, a1, 15 ; RV32IZbb-NEXT: add a0, a0, a1 -; RV32IZbb-NEXT: addi a1, zero, 15 +; RV32IZbb-NEXT: li a1, 15 ; RV32IZbb-NEXT: minu a0, a0, a1 ; RV32IZbb-NEXT: ret ; @@ -266,7 +266,7 @@ ; RV64IZbb-NEXT: mul a1, a1, a2 ; RV64IZbb-NEXT: andi a1, a1, 15 ; RV64IZbb-NEXT: add a0, a0, a1 -; RV64IZbb-NEXT: addi a1, zero, 15 +; RV64IZbb-NEXT: li a1, 15 ; RV64IZbb-NEXT: minu a0, a0, a1 ; RV64IZbb-NEXT: ret %a = mul i4 %y, %z diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll --- a/llvm/test/CodeGen/RISCV/urem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll @@ -13,7 +13,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -28,7 +28,7 @@ ; RV32IM-NEXT: srli a2, a2, 1 ; RV32IM-NEXT: add a1, a2, a1 ; RV32IM-NEXT: srli a1, a1, 6 -; RV32IM-NEXT: addi a2, zero, 95 +; RV32IM-NEXT: li a2, 95 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 ; RV32IM-NEXT: ret @@ -39,7 +39,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -57,7 +57,7 @@ ; RV64IM-NEXT: srliw a2, a2, 1 ; RV64IM-NEXT: add a1, a2, a1 ; RV64IM-NEXT: srli a1, a1, 6 -; RV64IM-NEXT: addi a2, zero, 95 +; RV64IM-NEXT: li a2, 95 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -71,7 +71,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a1, zero, 1060 +; RV32I-NEXT: li a1, 1060 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -83,7 +83,7 @@ ; RV32IM-NEXT: addi a1, a1, -61 ; RV32IM-NEXT: mulhu a1, a0, a1 ; RV32IM-NEXT: srli a1, a1, 10 -; RV32IM-NEXT: addi a2, zero, 1060 +; RV32IM-NEXT: li a2, 1060 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 ; RV32IM-NEXT: ret @@ -94,7 +94,7 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: addi a1, zero, 1060 +; RV64I-NEXT: li a1, 1060 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -109,7 +109,7 @@ ; RV64IM-NEXT: addi a2, a2, -61 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 42 -; RV64IM-NEXT: addi a2, zero, 1060 +; RV64IM-NEXT: li a2, 1060 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -127,10 +127,10 @@ ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: add a0, s1, a0 @@ -149,7 +149,7 @@ ; RV32IM-NEXT: srli a2, a2, 1 ; RV32IM-NEXT: add a1, a2, a1 ; RV32IM-NEXT: srli a1, a1, 6 -; RV32IM-NEXT: addi a2, zero, 95 +; RV32IM-NEXT: li a2, 95 ; RV32IM-NEXT: mul a2, a1, a2 ; RV32IM-NEXT: sub a0, a0, a2 ; RV32IM-NEXT: add a0, a0, a1 @@ -163,11 +163,11 @@ ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli s0, a0, 32 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: add a0, s1, a0 @@ -189,7 +189,7 @@ ; RV64IM-NEXT: srliw a2, a2, 1 ; RV64IM-NEXT: add a1, a2, a1 ; RV64IM-NEXT: srli a1, a1, 6 -; RV64IM-NEXT: addi a2, zero, 95 +; RV64IM-NEXT: li a2, 95 ; RV64IM-NEXT: mulw a2, a1, a2 ; RV64IM-NEXT: subw a0, a0, a2 ; RV64IM-NEXT: addw a0, a0, a1 @@ -214,7 +214,7 @@ define i32 @dont_fold_urem_one(i32 %x) nounwind { ; CHECK-LABEL: dont_fold_urem_one: ; CHECK: # %bb.0: -; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: ret %1 = urem i32 %x, 1 ret i32 %1 @@ -235,8 +235,8 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: addi a2, zero, 98 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a2, 98 +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __umoddi3@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -246,8 +246,8 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: addi sp, sp, -16 ; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IM-NEXT: addi a2, zero, 98 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a2, 98 +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __umoddi3@plt ; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -257,7 +257,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: addi a1, zero, 98 +; RV64I-NEXT: li a1, 98 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -276,7 +276,7 @@ ; RV64IM-NEXT: addi a2, a2, 1505 ; RV64IM-NEXT: mulhu a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 4 -; RV64IM-NEXT: addi a2, zero, 98 +; RV64IM-NEXT: li a2, 98 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -213,7 +213,7 @@ ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: neg a0, a0 ; RV32-NEXT: andi a0, a0, 15 -; RV32-NEXT: addi a1, zero, 3 +; RV32-NEXT: li a1, 3 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: ret ; @@ -223,7 +223,7 @@ ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: neg a0, a0 ; RV64-NEXT: andi a0, a0, 15 -; RV64-NEXT: addi a1, zero, 3 +; RV64-NEXT: li a1, 3 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: ret ; @@ -233,7 +233,7 @@ ; RV32M-NEXT: add a0, a1, a0 ; RV32M-NEXT: neg a0, a0 ; RV32M-NEXT: andi a0, a0, 15 -; RV32M-NEXT: addi a1, zero, 3 +; RV32M-NEXT: li a1, 3 ; RV32M-NEXT: sltu a0, a1, a0 ; RV32M-NEXT: ret ; @@ -243,7 +243,7 @@ ; RV64M-NEXT: add a0, a1, a0 ; RV64M-NEXT: neg a0, a0 ; RV64M-NEXT: andi a0, a0, 15 -; RV64M-NEXT: addi a1, zero, 3 +; RV64M-NEXT: li a1, 3 ; RV64M-NEXT: sltu a0, a1, a0 ; RV64M-NEXT: ret ; @@ -253,7 +253,7 @@ ; RV32MV-NEXT: add a0, a1, a0 ; RV32MV-NEXT: neg a0, a0 ; RV32MV-NEXT: andi a0, a0, 15 -; RV32MV-NEXT: addi a1, zero, 3 +; RV32MV-NEXT: li a1, 3 ; RV32MV-NEXT: sltu a0, a1, a0 ; RV32MV-NEXT: ret ; @@ -263,7 +263,7 @@ ; RV64MV-NEXT: add a0, a1, a0 ; RV64MV-NEXT: neg a0, a0 ; RV64MV-NEXT: andi a0, a0, 15 -; RV64MV-NEXT: addi a1, zero, 3 +; RV64MV-NEXT: li a1, 3 ; RV64MV-NEXT: sltu a0, a1, a0 ; RV64MV-NEXT: ret %urem = urem i4 %X, 5 @@ -276,10 +276,10 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-NEXT: addi a1, zero, 307 +; RV32-NEXT: li a1, 307 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: andi a0, a0, 511 -; RV32-NEXT: addi a1, zero, 1 +; RV32-NEXT: li a1, 1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 @@ -289,10 +289,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: addi a1, zero, 307 +; RV64-NEXT: li a1, 307 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: andi a0, a0, 511 -; RV64-NEXT: addi a1, zero, 1 +; RV64-NEXT: li a1, 1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 @@ -300,37 +300,37 @@ ; ; RV32M-LABEL: test_urem_negative_odd: ; RV32M: # %bb.0: -; RV32M-NEXT: addi a1, zero, 307 +; RV32M-NEXT: li a1, 307 ; RV32M-NEXT: mul a0, a0, a1 ; RV32M-NEXT: andi a0, a0, 511 -; RV32M-NEXT: addi a1, zero, 1 +; RV32M-NEXT: li a1, 1 ; RV32M-NEXT: sltu a0, a1, a0 ; RV32M-NEXT: ret ; ; RV64M-LABEL: test_urem_negative_odd: ; RV64M: # %bb.0: -; RV64M-NEXT: addi a1, zero, 307 +; RV64M-NEXT: li a1, 307 ; RV64M-NEXT: mul a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 511 -; RV64M-NEXT: addi a1, zero, 1 +; RV64M-NEXT: li a1, 1 ; RV64M-NEXT: sltu a0, a1, a0 ; RV64M-NEXT: ret ; ; RV32MV-LABEL: test_urem_negative_odd: ; RV32MV: # %bb.0: -; RV32MV-NEXT: addi a1, zero, 307 +; RV32MV-NEXT: li a1, 307 ; RV32MV-NEXT: mul a0, a0, a1 ; RV32MV-NEXT: andi a0, a0, 511 -; RV32MV-NEXT: addi a1, zero, 1 +; RV32MV-NEXT: li a1, 1 ; RV32MV-NEXT: sltu a0, a1, a0 ; RV32MV-NEXT: ret ; ; RV64MV-LABEL: test_urem_negative_odd: ; RV64MV: # %bb.0: -; RV64MV-NEXT: addi a1, zero, 307 +; RV64MV-NEXT: li a1, 307 ; RV64MV-NEXT: mul a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 511 -; RV64MV-NEXT: addi a1, zero, 1 +; RV64MV-NEXT: li a1, 1 ; RV64MV-NEXT: sltu a0, a1, a0 ; RV64MV-NEXT: ret %urem = urem i9 %X, -5 @@ -355,28 +355,28 @@ ; RV32-NEXT: or s1, a2, a0 ; RV32-NEXT: srli s2, a1, 11 ; RV32-NEXT: andi a0, a1, 2047 -; RV32-NEXT: addi a1, zero, 683 +; RV32-NEXT: li a1, 683 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: slli a1, a0, 10 ; RV32-NEXT: andi a0, a0, 2046 ; RV32-NEXT: srli a0, a0, 1 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: addi a1, zero, 341 +; RV32-NEXT: li a1, 341 ; RV32-NEXT: sltu s3, a1, a0 -; RV32-NEXT: addi a1, zero, 819 +; RV32-NEXT: li a1, 819 ; RV32-NEXT: mv a0, s1 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: addi a0, a0, -1638 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: addi a1, zero, 1 +; RV32-NEXT: li a1, 1 ; RV32-NEXT: sltu s1, a1, a0 -; RV32-NEXT: addi a1, zero, 1463 +; RV32-NEXT: li a1, 1463 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: addi a0, a0, -1463 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: addi a1, zero, 292 +; RV32-NEXT: li a1, 292 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: neg a1, s3 ; RV32-NEXT: neg a0, a0 @@ -415,28 +415,28 @@ ; RV64-NEXT: srli s2, a0, 11 ; RV64-NEXT: srli s1, a0, 22 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: addi a1, zero, 683 +; RV64-NEXT: li a1, 683 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: slli a1, a0, 10 ; RV64-NEXT: andi a0, a0, 2046 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: or a0, a0, a1 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: addi a1, zero, 341 +; RV64-NEXT: li a1, 341 ; RV64-NEXT: sltu s3, a1, a0 -; RV64-NEXT: addi a1, zero, 819 +; RV64-NEXT: li a1, 819 ; RV64-NEXT: mv a0, s1 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: addi a0, a0, -1638 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: addi a1, zero, 1 +; RV64-NEXT: li a1, 1 ; RV64-NEXT: sltu s1, a1, a0 -; RV64-NEXT: addi a1, zero, 1463 +; RV64-NEXT: li a1, 1463 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: addi a0, a0, -1463 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: addi a1, zero, 292 +; RV64-NEXT: li a1, 292 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: neg a1, s3 ; RV64-NEXT: neg a0, a0 @@ -447,7 +447,7 @@ ; RV64-NEXT: slli a1, s1, 22 ; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: sw a0, 0(s0) -; RV64-NEXT: addi a1, zero, -1 +; RV64-NEXT: li a1, -1 ; RV64-NEXT: srli a1, a1, 31 ; RV64-NEXT: and a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 @@ -469,26 +469,26 @@ ; RV32M-NEXT: or a1, a3, a1 ; RV32M-NEXT: srli a3, a2, 11 ; RV32M-NEXT: andi a2, a2, 2047 -; RV32M-NEXT: addi a4, zero, 683 +; RV32M-NEXT: li a4, 683 ; RV32M-NEXT: mul a2, a2, a4 ; RV32M-NEXT: slli a4, a2, 10 ; RV32M-NEXT: andi a2, a2, 2046 ; RV32M-NEXT: srli a2, a2, 1 ; RV32M-NEXT: or a2, a2, a4 ; RV32M-NEXT: andi a2, a2, 2047 -; RV32M-NEXT: addi a4, zero, 341 +; RV32M-NEXT: li a4, 341 ; RV32M-NEXT: sltu a2, a4, a2 -; RV32M-NEXT: addi a4, zero, 819 +; RV32M-NEXT: li a4, 819 ; RV32M-NEXT: mul a1, a1, a4 ; RV32M-NEXT: addi a1, a1, -1638 ; RV32M-NEXT: andi a1, a1, 2047 -; RV32M-NEXT: addi a4, zero, 1 +; RV32M-NEXT: li a4, 1 ; RV32M-NEXT: sltu a1, a4, a1 -; RV32M-NEXT: addi a4, zero, 1463 +; RV32M-NEXT: li a4, 1463 ; RV32M-NEXT: mul a3, a3, a4 ; RV32M-NEXT: addi a3, a3, -1463 ; RV32M-NEXT: andi a3, a3, 2047 -; RV32M-NEXT: addi a4, zero, 292 +; RV32M-NEXT: li a4, 292 ; RV32M-NEXT: sltu a3, a4, a3 ; RV32M-NEXT: neg a2, a2 ; RV32M-NEXT: neg a3, a3 @@ -514,26 +514,26 @@ ; RV64M-NEXT: srli a2, a1, 11 ; RV64M-NEXT: srli a3, a1, 22 ; RV64M-NEXT: andi a1, a1, 2047 -; RV64M-NEXT: addi a4, zero, 683 +; RV64M-NEXT: li a4, 683 ; RV64M-NEXT: mul a1, a1, a4 ; RV64M-NEXT: slli a4, a1, 10 ; RV64M-NEXT: andi a1, a1, 2046 ; RV64M-NEXT: srli a1, a1, 1 ; RV64M-NEXT: or a1, a1, a4 ; RV64M-NEXT: andi a1, a1, 2047 -; RV64M-NEXT: addi a4, zero, 341 +; RV64M-NEXT: li a4, 341 ; RV64M-NEXT: sltu a1, a4, a1 -; RV64M-NEXT: addi a4, zero, 819 +; RV64M-NEXT: li a4, 819 ; RV64M-NEXT: mul a3, a3, a4 ; RV64M-NEXT: addi a3, a3, -1638 ; RV64M-NEXT: andi a3, a3, 2047 -; RV64M-NEXT: addi a4, zero, 1 +; RV64M-NEXT: li a4, 1 ; RV64M-NEXT: sltu a3, a4, a3 -; RV64M-NEXT: addi a4, zero, 1463 +; RV64M-NEXT: li a4, 1463 ; RV64M-NEXT: mul a2, a2, a4 ; RV64M-NEXT: addi a2, a2, -1463 ; RV64M-NEXT: andi a2, a2, 2047 -; RV64M-NEXT: addi a4, zero, 292 +; RV64M-NEXT: li a4, 292 ; RV64M-NEXT: sltu a2, a4, a2 ; RV64M-NEXT: neg a1, a1 ; RV64M-NEXT: neg a2, a2 @@ -544,7 +544,7 @@ ; RV64M-NEXT: slli a2, a3, 22 ; RV64M-NEXT: sub a1, a1, a2 ; RV64M-NEXT: sw a1, 0(a0) -; RV64M-NEXT: addi a2, zero, -1 +; RV64M-NEXT: li a2, -1 ; RV64M-NEXT: srli a2, a2, 31 ; RV64M-NEXT: and a1, a1, a2 ; RV64M-NEXT: srli a1, a1, 32 @@ -576,16 +576,16 @@ ; RV32MV-NEXT: vsub.vv v8, v8, v10 ; RV32MV-NEXT: vmul.vv v8, v8, v9 ; RV32MV-NEXT: vadd.vv v9, v8, v8 -; RV32MV-NEXT: addi a1, zero, 9 +; RV32MV-NEXT: li a1, 9 ; RV32MV-NEXT: vmv.v.i v10, 10 ; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; RV32MV-NEXT: vmv.s.x v10, a1 ; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32MV-NEXT: vsll.vv v9, v9, v10 -; RV32MV-NEXT: addi a1, zero, 2047 +; RV32MV-NEXT: li a1, 2047 ; RV32MV-NEXT: vand.vx v8, v8, a1 ; RV32MV-NEXT: vmv.v.i v10, 0 -; RV32MV-NEXT: addi a2, zero, 1 +; RV32MV-NEXT: li a2, 1 ; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; RV32MV-NEXT: vmv1r.v v11, v10 ; RV32MV-NEXT: vmv.s.x v11, a2 @@ -641,16 +641,16 @@ ; RV64MV-NEXT: vsub.vv v8, v8, v10 ; RV64MV-NEXT: vmul.vv v8, v8, v9 ; RV64MV-NEXT: vadd.vv v9, v8, v8 -; RV64MV-NEXT: addi a1, zero, 9 +; RV64MV-NEXT: li a1, 9 ; RV64MV-NEXT: vmv.v.i v10, 10 ; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; RV64MV-NEXT: vmv.s.x v10, a1 ; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64MV-NEXT: vsll.vv v9, v9, v10 -; RV64MV-NEXT: addi a1, zero, 2047 +; RV64MV-NEXT: li a1, 2047 ; RV64MV-NEXT: vand.vx v8, v8, a1 ; RV64MV-NEXT: vmv.v.i v10, 0 -; RV64MV-NEXT: addi a2, zero, 1 +; RV64MV-NEXT: li a2, 1 ; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; RV64MV-NEXT: vmv1r.v v11, v10 ; RV64MV-NEXT: vmv.s.x v11, a2 @@ -676,7 +676,7 @@ ; RV64MV-NEXT: slli a2, a2, 22 ; RV64MV-NEXT: or a1, a1, a2 ; RV64MV-NEXT: sw a1, 0(a0) -; RV64MV-NEXT: addi a2, zero, -1 +; RV64MV-NEXT: li a2, -1 ; RV64MV-NEXT: srli a2, a2, 31 ; RV64MV-NEXT: and a1, a1, a2 ; RV64MV-NEXT: srli a1, a1, 32 diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll @@ -25,19 +25,19 @@ ; RV32I-NEXT: lhu s0, 4(a1) ; RV32I-NEXT: lhu a2, 0(a1) ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: addi a1, zero, 124 +; RV32I-NEXT: li a1, 124 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s5, a0 -; RV32I-NEXT: addi a1, zero, 98 +; RV32I-NEXT: li a1, 98 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 1003 +; RV32I-NEXT: li a1, 1003 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: sh a0, 6(s1) @@ -67,7 +67,7 @@ ; RV32IM-NEXT: srli a2, a2, 1 ; RV32IM-NEXT: add a2, a2, a5 ; RV32IM-NEXT: srli a2, a2, 6 -; RV32IM-NEXT: addi a5, zero, 95 +; RV32IM-NEXT: li a5, 95 ; RV32IM-NEXT: mul a2, a2, a5 ; RV32IM-NEXT: sub a2, a4, a2 ; RV32IM-NEXT: srli a4, a1, 2 @@ -75,21 +75,21 @@ ; RV32IM-NEXT: addi a5, a5, 529 ; RV32IM-NEXT: mulhu a4, a4, a5 ; RV32IM-NEXT: srli a4, a4, 2 -; RV32IM-NEXT: addi a5, zero, 124 +; RV32IM-NEXT: li a5, 124 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a1, a1, a4 ; RV32IM-NEXT: lui a4, 342392 ; RV32IM-NEXT: addi a4, a4, 669 ; RV32IM-NEXT: mulhu a4, a3, a4 ; RV32IM-NEXT: srli a4, a4, 5 -; RV32IM-NEXT: addi a5, zero, 98 +; RV32IM-NEXT: li a5, 98 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a3, a3, a4 ; RV32IM-NEXT: lui a4, 267633 ; RV32IM-NEXT: addi a4, a4, -1809 ; RV32IM-NEXT: mulhu a4, a6, a4 ; RV32IM-NEXT: srli a4, a4, 8 -; RV32IM-NEXT: addi a5, zero, 1003 +; RV32IM-NEXT: li a5, 1003 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a4, a6, a4 ; RV32IM-NEXT: sh a4, 6(a0) @@ -113,19 +113,19 @@ ; RV64I-NEXT: lhu s0, 8(a1) ; RV64I-NEXT: lhu a2, 0(a1) ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: addi a1, zero, 124 +; RV64I-NEXT: li a1, 124 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s5, a0 -; RV64I-NEXT: addi a1, zero, 98 +; RV64I-NEXT: li a1, 98 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 1003 +; RV64I-NEXT: li a1, 1003 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: sh a0, 6(s1) @@ -161,7 +161,7 @@ ; RV64IM-NEXT: srli a2, a2, 1 ; RV64IM-NEXT: add a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: addi a5, zero, 95 +; RV64IM-NEXT: li a5, 95 ; RV64IM-NEXT: mulw a2, a2, a5 ; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: srli a2, a4, 2 @@ -175,7 +175,7 @@ ; RV64IM-NEXT: addi a5, a5, 133 ; RV64IM-NEXT: mulhu a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 3 -; RV64IM-NEXT: addi a5, zero, 124 +; RV64IM-NEXT: li a5, 124 ; RV64IM-NEXT: mulw a2, a2, a5 ; RV64IM-NEXT: subw a2, a4, a2 ; RV64IM-NEXT: srli a4, a3, 1 @@ -189,7 +189,7 @@ ; RV64IM-NEXT: addi a5, a5, 1505 ; RV64IM-NEXT: mulhu a4, a4, a5 ; RV64IM-NEXT: srli a4, a4, 4 -; RV64IM-NEXT: addi a5, zero, 98 +; RV64IM-NEXT: li a5, 98 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 8364 @@ -202,7 +202,7 @@ ; RV64IM-NEXT: addi a4, a4, 1213 ; RV64IM-NEXT: mulhu a4, a6, a4 ; RV64IM-NEXT: srli a4, a4, 7 -; RV64IM-NEXT: addi a5, zero, 1003 +; RV64IM-NEXT: li a5, 1003 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a4, a6, a4 ; RV64IM-NEXT: sh a4, 6(a0) @@ -230,19 +230,19 @@ ; RV32I-NEXT: lhu s0, 4(a1) ; RV32I-NEXT: lhu a2, 0(a1) ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s5, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: sh a0, 6(s1) @@ -272,7 +272,7 @@ ; RV32IM-NEXT: srli a3, a3, 1 ; RV32IM-NEXT: add a2, a3, a2 ; RV32IM-NEXT: srli a2, a2, 6 -; RV32IM-NEXT: addi a3, zero, 95 +; RV32IM-NEXT: li a3, 95 ; RV32IM-NEXT: mul a2, a2, a3 ; RV32IM-NEXT: sub t0, a4, a2 ; RV32IM-NEXT: mulhu a4, a1, a5 @@ -317,19 +317,19 @@ ; RV64I-NEXT: lhu s0, 8(a1) ; RV64I-NEXT: lhu a2, 0(a1) ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s5, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: sh a0, 6(s1) @@ -365,7 +365,7 @@ ; RV64IM-NEXT: srli a3, a3, 1 ; RV64IM-NEXT: add a2, a3, a2 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: addi a3, zero, 95 +; RV64IM-NEXT: li a3, 95 ; RV64IM-NEXT: mulw a2, a2, a3 ; RV64IM-NEXT: subw t0, a1, a2 ; RV64IM-NEXT: mulhu a2, a4, a5 @@ -420,35 +420,35 @@ ; RV32I-NEXT: lhu s4, 8(a1) ; RV32I-NEXT: lhu s1, 12(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s5, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s6, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s7, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s8, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: mv s9, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __udivsi3@plt ; RV32I-NEXT: add a0, s8, a0 @@ -486,7 +486,7 @@ ; RV32IM-NEXT: srli a3, a3, 1 ; RV32IM-NEXT: add a2, a3, a2 ; RV32IM-NEXT: srli t3, a2, 6 -; RV32IM-NEXT: addi t0, zero, 95 +; RV32IM-NEXT: li t0, 95 ; RV32IM-NEXT: mul a3, t3, t0 ; RV32IM-NEXT: sub t1, a4, a3 ; RV32IM-NEXT: mulhu a4, a1, a5 @@ -539,35 +539,35 @@ ; RV64I-NEXT: lhu s4, 16(a1) ; RV64I-NEXT: lhu s1, 24(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s5, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s6, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s7, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s8, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: mv s9, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __udivdi3@plt ; RV64I-NEXT: addw a0, s8, a0 @@ -611,7 +611,7 @@ ; RV64IM-NEXT: srli a3, a3, 1 ; RV64IM-NEXT: add a2, a3, a2 ; RV64IM-NEXT: srli t3, a2, 6 -; RV64IM-NEXT: addi t0, zero, 95 +; RV64IM-NEXT: li t0, 95 ; RV64IM-NEXT: mulw a3, t3, t0 ; RV64IM-NEXT: subw t1, a1, a3 ; RV64IM-NEXT: mulhu a3, a4, a5 @@ -665,7 +665,7 @@ ; RV32I-NEXT: lhu s1, 0(a1) ; RV32I-NEXT: lhu a2, 12(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 95 +; RV32I-NEXT: li a1, 95 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: andi a1, s1, 63 @@ -696,7 +696,7 @@ ; RV32IM-NEXT: srli a2, a2, 1 ; RV32IM-NEXT: add a2, a2, a5 ; RV32IM-NEXT: srli a2, a2, 6 -; RV32IM-NEXT: addi a5, zero, 95 +; RV32IM-NEXT: li a5, 95 ; RV32IM-NEXT: mul a2, a2, a5 ; RV32IM-NEXT: sub a2, a4, a2 ; RV32IM-NEXT: andi a1, a1, 63 @@ -721,7 +721,7 @@ ; RV64I-NEXT: lhu s1, 0(a1) ; RV64I-NEXT: lhu a2, 24(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 95 +; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: andi a1, s1, 63 @@ -758,7 +758,7 @@ ; RV64IM-NEXT: srli a2, a2, 1 ; RV64IM-NEXT: add a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: addi a5, zero, 95 +; RV64IM-NEXT: li a5, 95 ; RV64IM-NEXT: mulw a2, a2, a5 ; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: andi a2, a4, 63 @@ -787,11 +787,11 @@ ; RV32I-NEXT: lhu s1, 8(a1) ; RV32I-NEXT: lhu a2, 4(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a1, zero, 654 +; RV32I-NEXT: li a1, 654 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s3, a0 -; RV32I-NEXT: addi a1, zero, 23 +; RV32I-NEXT: li a1, 23 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __umodsi3@plt ; RV32I-NEXT: mv s1, a0 @@ -821,14 +821,14 @@ ; RV32IM-NEXT: addi a5, a5, -1903 ; RV32IM-NEXT: mulhu a4, a4, a5 ; RV32IM-NEXT: srli a4, a4, 8 -; RV32IM-NEXT: addi a5, zero, 654 +; RV32IM-NEXT: li a5, 654 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a2, a2, a4 ; RV32IM-NEXT: lui a4, 729444 ; RV32IM-NEXT: addi a4, a4, 713 ; RV32IM-NEXT: mulhu a4, a1, a4 ; RV32IM-NEXT: srli a4, a4, 4 -; RV32IM-NEXT: addi a5, zero, 23 +; RV32IM-NEXT: li a5, 23 ; RV32IM-NEXT: mul a4, a4, a5 ; RV32IM-NEXT: sub a1, a1, a4 ; RV32IM-NEXT: lui a4, 395996 @@ -857,11 +857,11 @@ ; RV64I-NEXT: lhu s1, 16(a1) ; RV64I-NEXT: lhu a2, 8(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 654 +; RV64I-NEXT: li a1, 654 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s3, a0 -; RV64I-NEXT: addi a1, zero, 23 +; RV64I-NEXT: li a1, 23 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s1, a0 @@ -899,7 +899,7 @@ ; RV64IM-NEXT: srli a5, a5, 1 ; RV64IM-NEXT: add a4, a5, a4 ; RV64IM-NEXT: srli a4, a4, 4 -; RV64IM-NEXT: addi a5, zero, 23 +; RV64IM-NEXT: li a5, 23 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a1, a1, a4 ; RV64IM-NEXT: srli a4, a3, 1 @@ -913,7 +913,7 @@ ; RV64IM-NEXT: addi a5, a5, 965 ; RV64IM-NEXT: mulhu a4, a4, a5 ; RV64IM-NEXT: srli a4, a4, 7 -; RV64IM-NEXT: addi a5, zero, 654 +; RV64IM-NEXT: li a5, 654 ; RV64IM-NEXT: mulw a4, a4, a5 ; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 1044567 @@ -973,23 +973,23 @@ ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: addi a2, zero, 1 +; RV32I-NEXT: li a2, 1 ; RV32I-NEXT: mv a0, a3 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __umoddi3@plt ; RV32I-NEXT: mv s7, a0 ; RV32I-NEXT: mv s8, a1 -; RV32I-NEXT: addi a2, zero, 654 +; RV32I-NEXT: li a2, 654 ; RV32I-NEXT: mv a0, s6 ; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __umoddi3@plt ; RV32I-NEXT: mv s6, a0 ; RV32I-NEXT: mv s9, a1 -; RV32I-NEXT: addi a2, zero, 23 +; RV32I-NEXT: li a2, 23 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s5 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __umoddi3@plt ; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv s1, a1 @@ -997,7 +997,7 @@ ; RV32I-NEXT: addi a2, a0, 1327 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s3 -; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __umoddi3@plt ; RV32I-NEXT: sw a1, 28(s0) ; RV32I-NEXT: sw a0, 24(s0) @@ -1044,23 +1044,23 @@ ; RV32IM-NEXT: lw a3, 0(a1) ; RV32IM-NEXT: lw a1, 4(a1) ; RV32IM-NEXT: mv s0, a0 -; RV32IM-NEXT: addi a2, zero, 1 +; RV32IM-NEXT: li a2, 1 ; RV32IM-NEXT: mv a0, a3 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __umoddi3@plt ; RV32IM-NEXT: mv s7, a0 ; RV32IM-NEXT: mv s8, a1 -; RV32IM-NEXT: addi a2, zero, 654 +; RV32IM-NEXT: li a2, 654 ; RV32IM-NEXT: mv a0, s6 ; RV32IM-NEXT: mv a1, s1 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __umoddi3@plt ; RV32IM-NEXT: mv s6, a0 ; RV32IM-NEXT: mv s9, a1 -; RV32IM-NEXT: addi a2, zero, 23 +; RV32IM-NEXT: li a2, 23 ; RV32IM-NEXT: mv a0, s4 ; RV32IM-NEXT: mv a1, s5 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __umoddi3@plt ; RV32IM-NEXT: mv s4, a0 ; RV32IM-NEXT: mv s1, a1 @@ -1068,7 +1068,7 @@ ; RV32IM-NEXT: addi a2, a0, 1327 ; RV32IM-NEXT: mv a0, s2 ; RV32IM-NEXT: mv a1, s3 -; RV32IM-NEXT: mv a3, zero +; RV32IM-NEXT: li a3, 0 ; RV32IM-NEXT: call __umoddi3@plt ; RV32IM-NEXT: sw a1, 28(s0) ; RV32IM-NEXT: sw a0, 24(s0) @@ -1104,11 +1104,11 @@ ; RV64I-NEXT: ld s1, 16(a1) ; RV64I-NEXT: ld a2, 8(a1) ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: addi a1, zero, 654 +; RV64I-NEXT: li a1, 654 ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s3, a0 -; RV64I-NEXT: addi a1, zero, 23 +; RV64I-NEXT: li a1, 23 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __umoddi3@plt ; RV64I-NEXT: mv s1, a0 @@ -1146,7 +1146,7 @@ ; RV64IM-NEXT: srli a5, a5, 1 ; RV64IM-NEXT: add a4, a5, a4 ; RV64IM-NEXT: srli a4, a4, 4 -; RV64IM-NEXT: addi a5, zero, 23 +; RV64IM-NEXT: li a5, 23 ; RV64IM-NEXT: mul a4, a4, a5 ; RV64IM-NEXT: sub a1, a1, a4 ; RV64IM-NEXT: srli a4, a3, 1 @@ -1160,7 +1160,7 @@ ; RV64IM-NEXT: addi a5, a5, 965 ; RV64IM-NEXT: mulhu a4, a4, a5 ; RV64IM-NEXT: srli a4, a4, 7 -; RV64IM-NEXT: addi a5, zero, 654 +; RV64IM-NEXT: li a5, 654 ; RV64IM-NEXT: mul a4, a4, a5 ; RV64IM-NEXT: sub a3, a3, a4 ; RV64IM-NEXT: lui a4, 1044567 diff --git a/llvm/test/CodeGen/RISCV/usub_sat.ll b/llvm/test/CodeGen/RISCV/usub_sat.ll --- a/llvm/test/CodeGen/RISCV/usub_sat.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat.ll @@ -15,7 +15,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a2, a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -26,7 +26,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: subw a1, a0, a1 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a2, a1, .LBB0_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -62,8 +62,8 @@ ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: .LBB1_3: -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: bnez a4, .LBB1_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: mv a0, a2 @@ -75,7 +75,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a2, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -95,8 +95,8 @@ ; RV32IZbb-NEXT: .LBB1_2: ; RV32IZbb-NEXT: sltu a4, a0, a2 ; RV32IZbb-NEXT: .LBB1_3: -; RV32IZbb-NEXT: mv a0, zero -; RV32IZbb-NEXT: mv a1, zero +; RV32IZbb-NEXT: li a0, 0 +; RV32IZbb-NEXT: li a1, 0 ; RV32IZbb-NEXT: bnez a4, .LBB1_5 ; RV32IZbb-NEXT: # %bb.4: ; RV32IZbb-NEXT: mv a0, a2 @@ -118,7 +118,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a2, a1, .LBB2_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -129,7 +129,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a2, a1, .LBB2_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -156,7 +156,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a2, a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -167,7 +167,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a2, a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -194,7 +194,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a2, a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -205,7 +205,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a2, a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 diff --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll @@ -16,7 +16,7 @@ ; RV32I-NEXT: mv a3, a0 ; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: sub a1, a3, a0 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a3, a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -28,7 +28,7 @@ ; RV64I-NEXT: mulw a1, a1, a2 ; RV64I-NEXT: subw a1, a0, a1 ; RV64I-NEXT: sext.w a2, a0 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a2, a1, .LBB0_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -68,8 +68,8 @@ ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: sltu a4, a0, a3 ; RV32I-NEXT: .LBB1_3: -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: bnez a4, .LBB1_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: mv a0, a3 @@ -81,7 +81,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: sub a2, a0, a2 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a1, a2, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a2 @@ -101,8 +101,8 @@ ; RV32IZbb-NEXT: .LBB1_2: ; RV32IZbb-NEXT: sltu a4, a0, a3 ; RV32IZbb-NEXT: .LBB1_3: -; RV32IZbb-NEXT: mv a0, zero -; RV32IZbb-NEXT: mv a1, zero +; RV32IZbb-NEXT: li a0, 0 +; RV32IZbb-NEXT: li a1, 0 ; RV32IZbb-NEXT: bnez a4, .LBB1_5 ; RV32IZbb-NEXT: # %bb.4: ; RV32IZbb-NEXT: mv a0, a3 @@ -129,7 +129,7 @@ ; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: and a0, a0, a3 ; RV32I-NEXT: sub a1, a4, a0 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a4, a1, .LBB2_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -144,7 +144,7 @@ ; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: and a0, a0, a3 ; RV64I-NEXT: sub a1, a4, a0 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a4, a1, .LBB2_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -180,7 +180,7 @@ ; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a1, a3, a0 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a3, a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -193,7 +193,7 @@ ; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: sub a1, a3, a0 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a3, a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 @@ -229,7 +229,7 @@ ; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: andi a0, a0, 15 ; RV32I-NEXT: sub a1, a3, a0 -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: bltu a3, a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 @@ -242,7 +242,7 @@ ; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: andi a0, a0, 15 ; RV64I-NEXT: sub a1, a3, a0 -; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: li a0, 0 ; RV64I-NEXT: bltu a3, a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -421,8 +421,8 @@ ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, -16 ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888 -; ILP32-ILP32F-FPELIM-NEXT: addi a4, zero, 2 -; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero +; ILP32-ILP32F-FPELIM-NEXT: li a4, 2 +; ILP32-ILP32F-FPELIM-NEXT: li a2, 0 ; ILP32-ILP32F-FPELIM-NEXT: call va1@plt ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16 @@ -435,8 +435,8 @@ ; ILP32-ILP32F-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16 ; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888 -; ILP32-ILP32F-WITHFP-NEXT: addi a4, zero, 2 -; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero +; ILP32-ILP32F-WITHFP-NEXT: li a4, 2 +; ILP32-ILP32F-WITHFP-NEXT: li a2, 0 ; ILP32-ILP32F-WITHFP-NEXT: call va1@plt ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -448,8 +448,8 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, -16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a4, zero, 2 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a4, 2 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a2, 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va1@plt ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16 @@ -459,9 +459,9 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 1023 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1023 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 52 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, zero, 2 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2 ; LP64-LP64F-LP64D-FPELIM-NEXT: call va1@plt ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 @@ -473,9 +473,9 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 1023 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1023 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 52 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, zero, 2 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 2 ; LP64-LP64F-LP64D-WITHFP-NEXT: call va1@plt ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -572,7 +572,7 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: srli a1, a1, 32 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, zero, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 32 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, -8 ; LP64-LP64F-LP64D-FPELIM-NEXT: and a0, a0, a1 @@ -601,7 +601,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: srli a1, a1, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, zero, 1 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, -8 ; LP64-LP64F-LP64D-WITHFP-NEXT: and a0, a0, a1 @@ -747,7 +747,7 @@ ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, -16 ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888 -; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero +; ILP32-ILP32F-FPELIM-NEXT: li a2, 0 ; ILP32-ILP32F-FPELIM-NEXT: call va2@plt ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16 @@ -760,7 +760,7 @@ ; ILP32-ILP32F-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16 ; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888 -; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero +; ILP32-ILP32F-WITHFP-NEXT: li a2, 0 ; ILP32-ILP32F-WITHFP-NEXT: call va2@plt ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -772,7 +772,7 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, -16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a2, 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va2@plt ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16 @@ -782,7 +782,7 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 1023 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1023 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 52 ; LP64-LP64F-LP64D-FPELIM-NEXT: call va2@plt ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -795,7 +795,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 1023 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1023 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 52 ; LP64-LP64F-LP64D-WITHFP-NEXT: call va2@plt ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload @@ -898,7 +898,7 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: srli a2, a2, 32 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a2, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, zero, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 1 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 32 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a2, -8 ; LP64-LP64F-LP64D-FPELIM-NEXT: and a0, a0, a2 @@ -927,7 +927,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: srli a2, a2, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, a2, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, -24(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, zero, 1 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 1 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a2, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, a2, -8 ; LP64-LP64F-LP64D-WITHFP-NEXT: and a0, a0, a2 @@ -1079,11 +1079,11 @@ ; ILP32-ILP32F-FPELIM: # %bb.0: ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, -16 ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 2 -; ILP32-ILP32F-FPELIM-NEXT: addi a1, zero, 1111 +; ILP32-ILP32F-FPELIM-NEXT: li a0, 2 +; ILP32-ILP32F-FPELIM-NEXT: li a1, 1111 ; ILP32-ILP32F-FPELIM-NEXT: lui a5, 262144 -; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero -; ILP32-ILP32F-FPELIM-NEXT: mv a4, zero +; ILP32-ILP32F-FPELIM-NEXT: li a2, 0 +; ILP32-ILP32F-FPELIM-NEXT: li a4, 0 ; ILP32-ILP32F-FPELIM-NEXT: call va3@plt ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16 @@ -1095,11 +1095,11 @@ ; ILP32-ILP32F-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; ILP32-ILP32F-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16 -; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 2 -; ILP32-ILP32F-WITHFP-NEXT: addi a1, zero, 1111 +; ILP32-ILP32F-WITHFP-NEXT: li a0, 2 +; ILP32-ILP32F-WITHFP-NEXT: li a1, 1111 ; ILP32-ILP32F-WITHFP-NEXT: lui a5, 262144 -; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero -; ILP32-ILP32F-WITHFP-NEXT: mv a4, zero +; ILP32-ILP32F-WITHFP-NEXT: li a2, 0 +; ILP32-ILP32F-WITHFP-NEXT: li a4, 0 ; ILP32-ILP32F-WITHFP-NEXT: call va3@plt ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1110,11 +1110,11 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM: # %bb.0: ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, -16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 2 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, zero, 1111 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a0, 2 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a1, 1111 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a5, 262144 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a4, zero +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a2, 0 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a4, 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va3@plt ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16 @@ -1124,10 +1124,10 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a0, 62 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 2 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, zero, 1111 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111 ; LP64-LP64F-LP64D-FPELIM-NEXT: call va3@plt ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 @@ -1139,10 +1139,10 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 1 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a0, 62 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 2 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, zero, 1111 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 2 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1111 ; LP64-LP64F-LP64D-WITHFP-NEXT: call va3@plt ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -1396,11 +1396,11 @@ ; ILP32-ILP32F-FPELIM: # %bb.0: ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, -64 ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 60(sp) # 4-byte Folded Spill -; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 17 +; ILP32-ILP32F-FPELIM-NEXT: li a0, 17 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 24(sp) -; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 16 +; ILP32-ILP32F-FPELIM-NEXT: li a0, 16 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 20(sp) -; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 15 +; ILP32-ILP32F-FPELIM-NEXT: li a0, 15 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 16(sp) ; ILP32-ILP32F-FPELIM-NEXT: lui a0, 262236 ; ILP32-ILP32F-FPELIM-NEXT: addi a0, a0, 655 @@ -1408,7 +1408,7 @@ ; ILP32-ILP32F-FPELIM-NEXT: lui a0, 377487 ; ILP32-ILP32F-FPELIM-NEXT: addi a0, a0, 1475 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 8(sp) -; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 14 +; ILP32-ILP32F-FPELIM-NEXT: li a0, 14 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 0(sp) ; ILP32-ILP32F-FPELIM-NEXT: lui a0, 262153 ; ILP32-ILP32F-FPELIM-NEXT: addi a0, a0, 491 @@ -1423,12 +1423,12 @@ ; ILP32-ILP32F-FPELIM-NEXT: addi a5, a0, 1311 ; ILP32-ILP32F-FPELIM-NEXT: lui a0, 688509 ; ILP32-ILP32F-FPELIM-NEXT: addi a6, a0, -2048 -; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 1 -; ILP32-ILP32F-FPELIM-NEXT: addi a1, zero, 11 +; ILP32-ILP32F-FPELIM-NEXT: li a0, 1 +; ILP32-ILP32F-FPELIM-NEXT: li a1, 11 ; ILP32-ILP32F-FPELIM-NEXT: addi a2, sp, 32 -; ILP32-ILP32F-FPELIM-NEXT: addi a3, zero, 12 -; ILP32-ILP32F-FPELIM-NEXT: addi a4, zero, 13 -; ILP32-ILP32F-FPELIM-NEXT: addi a7, zero, 4 +; ILP32-ILP32F-FPELIM-NEXT: li a3, 12 +; ILP32-ILP32F-FPELIM-NEXT: li a4, 13 +; ILP32-ILP32F-FPELIM-NEXT: li a7, 4 ; ILP32-ILP32F-FPELIM-NEXT: sw a5, 32(sp) ; ILP32-ILP32F-FPELIM-NEXT: call va5_aligned_stack_callee@plt ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload @@ -1441,11 +1441,11 @@ ; ILP32-ILP32F-WITHFP-NEXT: sw ra, 60(sp) # 4-byte Folded Spill ; ILP32-ILP32F-WITHFP-NEXT: sw s0, 56(sp) # 4-byte Folded Spill ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 64 -; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 17 +; ILP32-ILP32F-WITHFP-NEXT: li a0, 17 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, 24(sp) -; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 16 +; ILP32-ILP32F-WITHFP-NEXT: li a0, 16 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, 20(sp) -; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 15 +; ILP32-ILP32F-WITHFP-NEXT: li a0, 15 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, 16(sp) ; ILP32-ILP32F-WITHFP-NEXT: lui a0, 262236 ; ILP32-ILP32F-WITHFP-NEXT: addi a0, a0, 655 @@ -1453,7 +1453,7 @@ ; ILP32-ILP32F-WITHFP-NEXT: lui a0, 377487 ; ILP32-ILP32F-WITHFP-NEXT: addi a0, a0, 1475 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, 8(sp) -; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 14 +; ILP32-ILP32F-WITHFP-NEXT: li a0, 14 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, 0(sp) ; ILP32-ILP32F-WITHFP-NEXT: lui a0, 262153 ; ILP32-ILP32F-WITHFP-NEXT: addi a0, a0, 491 @@ -1468,12 +1468,12 @@ ; ILP32-ILP32F-WITHFP-NEXT: addi a5, a0, 1311 ; ILP32-ILP32F-WITHFP-NEXT: lui a0, 688509 ; ILP32-ILP32F-WITHFP-NEXT: addi a6, a0, -2048 -; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 1 -; ILP32-ILP32F-WITHFP-NEXT: addi a1, zero, 11 +; ILP32-ILP32F-WITHFP-NEXT: li a0, 1 +; ILP32-ILP32F-WITHFP-NEXT: li a1, 11 ; ILP32-ILP32F-WITHFP-NEXT: addi a2, s0, -32 -; ILP32-ILP32F-WITHFP-NEXT: addi a3, zero, 12 -; ILP32-ILP32F-WITHFP-NEXT: addi a4, zero, 13 -; ILP32-ILP32F-WITHFP-NEXT: addi a7, zero, 4 +; ILP32-ILP32F-WITHFP-NEXT: li a3, 12 +; ILP32-ILP32F-WITHFP-NEXT: li a4, 13 +; ILP32-ILP32F-WITHFP-NEXT: li a7, 4 ; ILP32-ILP32F-WITHFP-NEXT: sw a5, -32(s0) ; ILP32-ILP32F-WITHFP-NEXT: call va5_aligned_stack_callee@plt ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload @@ -1491,13 +1491,13 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a0, 377487 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, a0, 1475 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 8(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 17 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a0, 17 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 24(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 16 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a0, 16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 20(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 15 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a0, 15 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 16(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 14 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a0, 14 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 0(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a0, 262153 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, a0, 491 @@ -1512,12 +1512,12 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a5, a0, 1311 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a0, 688509 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a6, a0, -2048 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 1 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, zero, 11 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a0, 1 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a1, 11 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a2, sp, 32 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a3, zero, 12 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a4, zero, 13 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a7, zero, 4 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a3, 12 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a4, 13 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a7, 4 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a5, 32(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va5_aligned_stack_callee@plt ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload @@ -1528,11 +1528,11 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -48 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 17 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 17 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 15 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 15 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 2049 ; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, -1147 @@ -1562,11 +1562,11 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 1147 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 14 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a0, -1967 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 1 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, zero, 11 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a4, zero, 12 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a5, zero, 13 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a7, zero, 14 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 11 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a4, 12 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a5, 13 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a7, 14 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd t0, 0(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee@plt ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 40(sp) # 8-byte Folded Reload @@ -1579,11 +1579,11 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 48 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 17 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 17 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 24(sp) -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 16 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 16 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 16(sp) -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 15 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 15 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 2049 ; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, -1147 @@ -1613,11 +1613,11 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 1147 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 14 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a0, -1967 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 1 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, zero, 11 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a4, zero, 12 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a5, zero, 13 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a7, zero, 14 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 11 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a4, 12 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a5, 13 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a7, 14 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd t0, 0(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: call va5_aligned_stack_callee@plt ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -23,13 +23,13 @@ ; RV32-NEXT: mv a5, a0 ; RV32-NEXT: bgtz a3, .LBB0_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a5, zero +; RV32-NEXT: li a5, 0 ; RV32-NEXT: .LBB0_2: ; RV32-NEXT: srai a4, a4, 24 ; RV32-NEXT: andi a5, a5, 255 ; RV32-NEXT: bgtz a6, .LBB0_4 ; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, zero +; RV32-NEXT: li a2, 0 ; RV32-NEXT: j .LBB0_5 ; RV32-NEXT: .LBB0_4: ; RV32-NEXT: srli a2, a2, 8 @@ -38,7 +38,7 @@ ; RV32-NEXT: or a2, a5, a2 ; RV32-NEXT: bgtz a4, .LBB0_7 ; RV32-NEXT: # %bb.6: -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: j .LBB0_8 ; RV32-NEXT: .LBB0_7: ; RV32-NEXT: srli a0, a0, 16 @@ -61,13 +61,13 @@ ; RV64-NEXT: mv a5, a0 ; RV64-NEXT: bgtz a3, .LBB0_2 ; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a5, zero +; RV64-NEXT: li a5, 0 ; RV64-NEXT: .LBB0_2: ; RV64-NEXT: srai a4, a4, 56 ; RV64-NEXT: andi a5, a5, 255 ; RV64-NEXT: bgtz a6, .LBB0_4 ; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a2, zero +; RV64-NEXT: li a2, 0 ; RV64-NEXT: j .LBB0_5 ; RV64-NEXT: .LBB0_4: ; RV64-NEXT: srli a2, a2, 8 @@ -76,7 +76,7 @@ ; RV64-NEXT: or a2, a5, a2 ; RV64-NEXT: bgtz a4, .LBB0_7 ; RV64-NEXT: # %bb.6: -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: j .LBB0_8 ; RV64-NEXT: .LBB0_7: ; RV64-NEXT: srli a0, a0, 16 diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -699,7 +699,7 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) { ; RV32-LABEL: usubo.i32.constant.lhs: ; RV32: # %bb.0: # %entry -; RV32-NEXT: addi a2, zero, -2 +; RV32-NEXT: li a2, -2 ; RV32-NEXT: sub a2, a2, a0 ; RV32-NEXT: addi a0, a2, 1 ; RV32-NEXT: seqz a0, a0 @@ -708,7 +708,7 @@ ; ; RV64-LABEL: usubo.i32.constant.lhs: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addi a2, zero, -2 +; RV64-NEXT: li a2, -2 ; RV64-NEXT: subw a2, a2, a0 ; RV64-NEXT: addi a0, a2, 1 ; RV64-NEXT: seqz a0, a0 @@ -717,7 +717,7 @@ ; ; RV32ZBA-LABEL: usubo.i32.constant.lhs: ; RV32ZBA: # %bb.0: # %entry -; RV32ZBA-NEXT: addi a2, zero, -2 +; RV32ZBA-NEXT: li a2, -2 ; RV32ZBA-NEXT: sub a2, a2, a0 ; RV32ZBA-NEXT: addi a0, a2, 1 ; RV32ZBA-NEXT: seqz a0, a0 @@ -726,7 +726,7 @@ ; ; RV64ZBA-LABEL: usubo.i32.constant.lhs: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: addi a2, zero, -2 +; RV64ZBA-NEXT: li a2, -2 ; RV64ZBA-NEXT: subw a2, a2, a0 ; RV64ZBA-NEXT: addi a0, a2, 1 ; RV64ZBA-NEXT: seqz a0, a0 @@ -849,7 +849,7 @@ define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) { ; RV32-LABEL: smulo2.i32: ; RV32: # %bb.0: # %entry -; RV32-NEXT: addi a2, zero, 13 +; RV32-NEXT: li a2, 13 ; RV32-NEXT: mulh a3, a0, a2 ; RV32-NEXT: mul a2, a0, a2 ; RV32-NEXT: srai a0, a2, 31 @@ -861,7 +861,7 @@ ; RV64-LABEL: smulo2.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: addi a2, zero, 13 +; RV64-NEXT: li a2, 13 ; RV64-NEXT: mul a3, a0, a2 ; RV64-NEXT: mulw a0, a0, a2 ; RV64-NEXT: xor a0, a0, a3 @@ -871,7 +871,7 @@ ; ; RV32ZBA-LABEL: smulo2.i32: ; RV32ZBA: # %bb.0: # %entry -; RV32ZBA-NEXT: addi a2, zero, 13 +; RV32ZBA-NEXT: li a2, 13 ; RV32ZBA-NEXT: mulh a3, a0, a2 ; RV32ZBA-NEXT: mul a2, a0, a2 ; RV32ZBA-NEXT: srai a0, a2, 31 @@ -1063,7 +1063,7 @@ define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) { ; RV32-LABEL: smulo2.i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: addi a7, zero, 13 +; RV32-NEXT: li a7, 13 ; RV32-NEXT: mulhu a4, a0, a7 ; RV32-NEXT: mul a5, a1, a7 ; RV32-NEXT: add t0, a5, a4 @@ -1089,7 +1089,7 @@ ; ; RV64-LABEL: smulo2.i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addi a2, zero, 13 +; RV64-NEXT: li a2, 13 ; RV64-NEXT: mulh a3, a0, a2 ; RV64-NEXT: mul a2, a0, a2 ; RV64-NEXT: srai a0, a2, 63 @@ -1100,7 +1100,7 @@ ; ; RV32ZBA-LABEL: smulo2.i64: ; RV32ZBA: # %bb.0: # %entry -; RV32ZBA-NEXT: addi a7, zero, 13 +; RV32ZBA-NEXT: li a7, 13 ; RV32ZBA-NEXT: mulhu a4, a0, a7 ; RV32ZBA-NEXT: mul a5, a1, a7 ; RV32ZBA-NEXT: add t0, a5, a4 @@ -1126,7 +1126,7 @@ ; ; RV64ZBA-LABEL: smulo2.i64: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: addi a2, zero, 13 +; RV64ZBA-NEXT: li a2, 13 ; RV64ZBA-NEXT: mulh a3, a0, a2 ; RV64ZBA-NEXT: mul a2, a0, a2 ; RV64ZBA-NEXT: srai a0, a2, 63 @@ -1191,7 +1191,7 @@ define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) { ; RV32-LABEL: umulo2.i32: ; RV32: # %bb.0: # %entry -; RV32-NEXT: addi a3, zero, 13 +; RV32-NEXT: li a3, 13 ; RV32-NEXT: mulhu a2, a0, a3 ; RV32-NEXT: snez a2, a2 ; RV32-NEXT: mul a0, a0, a3 @@ -1203,7 +1203,7 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: addi a2, zero, 13 +; RV64-NEXT: li a2, 13 ; RV64-NEXT: mul a2, a0, a2 ; RV64-NEXT: srli a0, a2, 32 ; RV64-NEXT: snez a0, a0 @@ -1212,7 +1212,7 @@ ; ; RV32ZBA-LABEL: umulo2.i32: ; RV32ZBA: # %bb.0: # %entry -; RV32ZBA-NEXT: addi a3, zero, 13 +; RV32ZBA-NEXT: li a3, 13 ; RV32ZBA-NEXT: mulhu a2, a0, a3 ; RV32ZBA-NEXT: snez a2, a2 ; RV32ZBA-NEXT: mul a0, a0, a3 @@ -1363,7 +1363,7 @@ define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) { ; RV32-LABEL: umulo2.i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: addi a3, zero, 13 +; RV32-NEXT: li a3, 13 ; RV32-NEXT: mul a4, a1, a3 ; RV32-NEXT: mulhu a5, a0, a3 ; RV32-NEXT: add a4, a5, a4 @@ -1379,7 +1379,7 @@ ; ; RV64-LABEL: umulo2.i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addi a3, zero, 13 +; RV64-NEXT: li a3, 13 ; RV64-NEXT: mulhu a2, a0, a3 ; RV64-NEXT: snez a2, a2 ; RV64-NEXT: mul a0, a0, a3 @@ -1389,7 +1389,7 @@ ; ; RV32ZBA-LABEL: umulo2.i64: ; RV32ZBA: # %bb.0: # %entry -; RV32ZBA-NEXT: addi a3, zero, 13 +; RV32ZBA-NEXT: li a3, 13 ; RV32ZBA-NEXT: mul a4, a1, a3 ; RV32ZBA-NEXT: mulhu a5, a0, a3 ; RV32ZBA-NEXT: add a4, a5, a4 @@ -1405,7 +1405,7 @@ ; ; RV64ZBA-LABEL: umulo2.i64: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: addi a3, zero, 13 +; RV64ZBA-NEXT: li a3, 13 ; RV64ZBA-NEXT: mulhu a2, a0, a3 ; RV64ZBA-NEXT: snez a2, a2 ; RV64ZBA-NEXT: mul a0, a0, a3 @@ -1596,7 +1596,7 @@ ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: addi a1, zero, -1 +; RV32-NEXT: li a1, -1 ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: ret ; @@ -1619,7 +1619,7 @@ ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: addi a1, zero, -1 +; RV32ZBA-NEXT: li a1, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: ret ; @@ -1996,7 +1996,7 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: addi a1, zero, -1 +; RV32-NEXT: li a1, -1 ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: ret ; @@ -2017,7 +2017,7 @@ ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: addi a1, zero, -1 +; RV32ZBA-NEXT: li a1, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: ret ; @@ -2870,10 +2870,10 @@ ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: beq a1, a0, .LBB50_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB50_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i32: @@ -2884,10 +2884,10 @@ ; RV64-NEXT: addw a0, a0, a1 ; RV64-NEXT: beq a0, a2, .LBB50_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB50_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i32: @@ -2897,10 +2897,10 @@ ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: beq a1, a0, .LBB50_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB50_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i32: @@ -2911,10 +2911,10 @@ ; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: beq a0, a2, .LBB50_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB50_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -2942,10 +2942,10 @@ ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: bgez a0, .LBB51_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB51_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i64: @@ -2955,10 +2955,10 @@ ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: beq a1, a0, .LBB51_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB51_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i64: @@ -2973,10 +2973,10 @@ ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: bgez a0, .LBB51_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB51_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i64: @@ -2986,10 +2986,10 @@ ; RV64ZBA-NEXT: slti a1, a1, 0 ; RV64ZBA-NEXT: beq a1, a0, .LBB51_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB51_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -3010,10 +3010,10 @@ ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: bgeu a1, a0, .LBB52_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB52_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i32: @@ -3022,10 +3022,10 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: bgeu a1, a0, .LBB52_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB52_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i32: @@ -3033,10 +3033,10 @@ ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: bgeu a1, a0, .LBB52_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB52_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i32: @@ -3045,10 +3045,10 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: bgeu a1, a0, .LBB52_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB52_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -3076,10 +3076,10 @@ ; RV32-NEXT: .LBB53_2: # %entry ; RV32-NEXT: beqz a0, .LBB53_4 ; RV32-NEXT: # %bb.3: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB53_4: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i64: @@ -3087,10 +3087,10 @@ ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: bgeu a1, a0, .LBB53_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB53_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i64: @@ -3105,10 +3105,10 @@ ; RV32ZBA-NEXT: .LBB53_2: # %entry ; RV32ZBA-NEXT: beqz a0, .LBB53_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB53_4: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i64: @@ -3116,10 +3116,10 @@ ; RV64ZBA-NEXT: add a1, a0, a1 ; RV64ZBA-NEXT: bgeu a1, a0, .LBB53_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB53_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -3142,10 +3142,10 @@ ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: beq a2, a0, .LBB54_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB54_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i32: @@ -3156,10 +3156,10 @@ ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: beq a0, a2, .LBB54_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB54_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i32: @@ -3169,10 +3169,10 @@ ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: beq a2, a0, .LBB54_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB54_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i32: @@ -3183,10 +3183,10 @@ ; RV64ZBA-NEXT: subw a0, a0, a1 ; RV64ZBA-NEXT: beq a0, a2, .LBB54_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB54_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -3212,10 +3212,10 @@ ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: bgez a0, .LBB55_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB55_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i64: @@ -3225,10 +3225,10 @@ ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: beq a2, a0, .LBB55_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB55_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i64: @@ -3241,10 +3241,10 @@ ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: bgez a0, .LBB55_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB55_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i64: @@ -3254,10 +3254,10 @@ ; RV64ZBA-NEXT: slt a0, a1, a0 ; RV64ZBA-NEXT: beq a2, a0, .LBB55_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB55_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -3278,10 +3278,10 @@ ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: bgeu a0, a1, .LBB56_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB56_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i32: @@ -3290,10 +3290,10 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: bgeu a0, a1, .LBB56_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB56_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i32: @@ -3301,10 +3301,10 @@ ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: bgeu a0, a1, .LBB56_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB56_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i32: @@ -3313,10 +3313,10 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: bgeu a0, a1, .LBB56_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB56_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -3342,14 +3342,14 @@ ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: bnez a0, .LBB57_4 ; RV32-NEXT: .LBB57_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; RV32-NEXT: .LBB57_3: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: beqz a0, .LBB57_2 ; RV32-NEXT: .LBB57_4: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i64: @@ -3357,10 +3357,10 @@ ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: bgeu a0, a1, .LBB57_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB57_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i64: @@ -3373,14 +3373,14 @@ ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: bnez a0, .LBB57_4 ; RV32ZBA-NEXT: .LBB57_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB57_3: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: beqz a0, .LBB57_2 ; RV32ZBA-NEXT: .LBB57_4: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i64: @@ -3388,10 +3388,10 @@ ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: bgeu a0, a1, .LBB57_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB57_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -3414,10 +3414,10 @@ ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: beq a2, a0, .LBB58_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB58_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i32: @@ -3428,10 +3428,10 @@ ; RV64-NEXT: mulw a0, a0, a1 ; RV64-NEXT: beq a0, a2, .LBB58_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB58_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i32: @@ -3441,10 +3441,10 @@ ; RV32ZBA-NEXT: srai a0, a0, 31 ; RV32ZBA-NEXT: beq a2, a0, .LBB58_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB58_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i32: @@ -3455,10 +3455,10 @@ ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: beq a0, a2, .LBB58_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB58_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -3529,10 +3529,10 @@ ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: beqz a0, .LBB59_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: j .LBB59_3 ; RV32-NEXT: .LBB59_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: .LBB59_3: # %overflow ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload @@ -3547,10 +3547,10 @@ ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: beq a2, a0, .LBB59_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB59_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i64: @@ -3608,10 +3608,10 @@ ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: beqz a0, .LBB59_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: j .LBB59_3 ; RV32ZBA-NEXT: .LBB59_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: .LBB59_3: # %overflow ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload @@ -3626,10 +3626,10 @@ ; RV64ZBA-NEXT: srai a0, a0, 63 ; RV64ZBA-NEXT: beq a2, a0, .LBB59_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB59_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -3647,7 +3647,7 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) { ; RV32-LABEL: smulo2.br.i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: addi a6, zero, -13 +; RV32-NEXT: li a6, -13 ; RV32-NEXT: mulhu a3, a0, a6 ; RV32-NEXT: mul a4, a1, a6 ; RV32-NEXT: add a3, a4, a3 @@ -3657,7 +3657,7 @@ ; RV32-NEXT: sub t0, a3, a0 ; RV32-NEXT: neg t1, a0 ; RV32-NEXT: sltu a2, t0, t1 -; RV32-NEXT: addi a7, zero, -1 +; RV32-NEXT: li a7, -1 ; RV32-NEXT: mulhu t2, a0, a7 ; RV32-NEXT: add a2, t2, a2 ; RV32-NEXT: add a2, t3, a2 @@ -3687,29 +3687,29 @@ ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: beqz a0, .LBB60_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB60_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.br.i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addi a1, zero, -13 +; RV64-NEXT: li a1, -13 ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: beq a2, a0, .LBB60_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB60_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.br.i64: ; RV32ZBA: # %bb.0: # %entry -; RV32ZBA-NEXT: addi a6, zero, -13 +; RV32ZBA-NEXT: li a6, -13 ; RV32ZBA-NEXT: mulhu a3, a0, a6 ; RV32ZBA-NEXT: mul a4, a1, a6 ; RV32ZBA-NEXT: add a3, a4, a3 @@ -3719,7 +3719,7 @@ ; RV32ZBA-NEXT: sub t0, a3, a0 ; RV32ZBA-NEXT: neg t1, a0 ; RV32ZBA-NEXT: sltu a2, t0, t1 -; RV32ZBA-NEXT: addi a7, zero, -1 +; RV32ZBA-NEXT: li a7, -1 ; RV32ZBA-NEXT: mulhu t2, a0, a7 ; RV32ZBA-NEXT: add a2, t2, a2 ; RV32ZBA-NEXT: add a2, t3, a2 @@ -3749,24 +3749,24 @@ ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: beqz a0, .LBB60_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB60_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.br.i64: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: addi a1, zero, -13 +; RV64ZBA-NEXT: li a1, -13 ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 ; RV64ZBA-NEXT: beq a2, a0, .LBB60_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB60_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13) @@ -3787,10 +3787,10 @@ ; RV32-NEXT: mulhu a0, a0, a1 ; RV32-NEXT: beqz a0, .LBB61_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB61_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i32: @@ -3801,10 +3801,10 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: beqz a0, .LBB61_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB61_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i32: @@ -3812,10 +3812,10 @@ ; RV32ZBA-NEXT: mulhu a0, a0, a1 ; RV32ZBA-NEXT: beqz a0, .LBB61_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB61_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i32: @@ -3826,10 +3826,10 @@ ; RV64ZBA-NEXT: srli a0, a0, 32 ; RV64ZBA-NEXT: beqz a0, .LBB61_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB61_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -3865,10 +3865,10 @@ ; RV32-NEXT: or a0, a0, a6 ; RV32-NEXT: beqz a0, .LBB62_2 ; RV32-NEXT: # %bb.1: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB62_2: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i64: @@ -3876,10 +3876,10 @@ ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: beqz a0, .LBB62_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB62_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i64: @@ -3902,10 +3902,10 @@ ; RV32ZBA-NEXT: or a0, a0, a6 ; RV32ZBA-NEXT: beqz a0, .LBB62_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB62_2: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i64: @@ -3913,10 +3913,10 @@ ; RV64ZBA-NEXT: mulhu a0, a0, a1 ; RV64ZBA-NEXT: beqz a0, .LBB62_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB62_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -3944,10 +3944,10 @@ ; RV32-NEXT: .LBB63_2: # %entry ; RV32-NEXT: beqz a0, .LBB63_4 ; RV32-NEXT: # %bb.3: # %overflow -; RV32-NEXT: mv a0, zero +; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB63_4: # %continue -; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.br.i64: @@ -3955,10 +3955,10 @@ ; RV64-NEXT: add a1, a0, a0 ; RV64-NEXT: bgeu a1, a0, .LBB63_2 ; RV64-NEXT: # %bb.1: # %overflow -; RV64-NEXT: mv a0, zero +; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB63_2: # %continue -; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.br.i64: @@ -3973,10 +3973,10 @@ ; RV32ZBA-NEXT: .LBB63_2: # %entry ; RV32ZBA-NEXT: beqz a0, .LBB63_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow -; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB63_4: # %continue -; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.br.i64: @@ -3984,10 +3984,10 @@ ; RV64ZBA-NEXT: add a1, a0, a0 ; RV64ZBA-NEXT: bgeu a1, a0, .LBB63_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow -; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB63_2: # %continue -; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2) diff --git a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll --- a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll +++ b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll @@ -18,10 +18,10 @@ ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB0_2 ; RV32I-NEXT: # %bb.1: # %if.end -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_2: # %if.then -; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: ret entry: %0 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 0), align 1 @@ -54,10 +54,10 @@ ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB1_2 ; RV32I-NEXT: # %bb.1: # %if.end -; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB1_2: # %if.then -; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: ret entry: %0 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 0), align 2 diff --git a/llvm/test/MC/RISCV/compress-rv32i.s b/llvm/test/MC/RISCV/compress-rv32i.s --- a/llvm/test/MC/RISCV/compress-rv32i.s +++ b/llvm/test/MC/RISCV/compress-rv32i.s @@ -57,10 +57,10 @@ addi ra, ra, -32 # CHECK-BYTES: 85 50 -# CHECK-ALIAS: addi ra, zero, -31 +# CHECK-ALIAS: li ra, -31 # CHECK-INST: c.li ra, -31 # CHECK: # encoding: [0x85,0x50] -addi ra, zero, -31 +li ra, -31 # CHECK-BYTES: 39 71 # CHECK-ALIAS: addi sp, sp, -64 diff --git a/llvm/test/MC/RISCV/numeric-reg-names.s b/llvm/test/MC/RISCV/numeric-reg-names.s --- a/llvm/test/MC/RISCV/numeric-reg-names.s +++ b/llvm/test/MC/RISCV/numeric-reg-names.s @@ -4,8 +4,8 @@ # RUN: | llvm-objdump -d -M numeric - \ # RUN: | FileCheck -check-prefix=CHECK-NUMERIC %s -# CHECK-NUMERIC: addi x10, x0, 1 -# CHECK-NUMERIC-NEXT: addi x10, x0, 1 +# CHECK-NUMERIC: li x10, 1 +# CHECK-NUMERIC-NEXT: li x10, 1 addi a0, x0, 1 addi a0, zero, 1 diff --git a/llvm/test/MC/RISCV/rv32i-aliases-valid.s b/llvm/test/MC/RISCV/rv32i-aliases-valid.s --- a/llvm/test/MC/RISCV/rv32i-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv32i-aliases-valid.s @@ -18,20 +18,25 @@ .Lpcrel_hi0: auipc a0, %pcrel_hi(foo) # CHECK-INST: addi a0, zero, 0 -# CHECK-ALIAS: mv a0, zero +# CHECK-ALIAS: li a0, 0 li x10, 0 -# CHECK-EXPAND: addi a0, zero, 1 +# CHECK-INST: addi a0, zero, 1 +# CHECK-ALIAS: li a0, 1 li x10, 1 -# CHECK-EXPAND: addi a0, zero, -1 +# CHECK-INST: addi a0, zero, -1 +# CHECK-ALIAS: li a0, -1 li x10, -1 -# CHECK-EXPAND: addi a0, zero, 2047 +# CHECK-INST: addi a0, zero, 2047 +# CHECK-ALIAS: li a0, 2047 li x10, 2047 -# CHECK-EXPAND: addi a0, zero, -2047 +# CHECK-INST: addi a0, zero, -2047 +# CHECK-ALIAS: li a0, -2047 li x10, -2047 # CHECK-EXPAND: lui a1, 1 # CHECK-EXPAND: addi a1, a1, -2048 li x11, 2048 -# CHECK-EXPAND: addi a1, zero, -2048 +# CHECK-INST: addi a1, zero, -2048 +# CHECK-ALIAS: li a1, -2048 li x11, -2048 # CHECK-EXPAND: lui a1, 1 # CHECK-EXPAND: addi a1, a1, -2047 @@ -68,10 +73,12 @@ # CHECK-EXPAND: lui a2, 524288 li x12, 0x80000000 -# CHECK-EXPAND: addi a2, zero, -1 +# CHECK-INST: addi a2, zero, -1 +# CHECK-ALIAS: li a2, -1 li x12, 0xFFFFFFFF -# CHECK-EXPAND: addi a0, zero, 1110 +# CHECK-INST: addi a0, zero, 1110 +# CHECK-ALIAS: li a0, 1110 li a0, %lo(0x123456) # CHECK-OBJ-NOALIAS: addi a0, zero, 0 diff --git a/llvm/test/MC/RISCV/rv64i-aliases-valid.s b/llvm/test/MC/RISCV/rv64i-aliases-valid.s --- a/llvm/test/MC/RISCV/rv64i-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv64i-aliases-valid.s @@ -21,20 +21,25 @@ .Lpcrel_hi0: auipc a0, %pcrel_hi(foo) # CHECK-INST: addi a0, zero, 0 -# CHECK-ALIAS: mv a0, zero +# CHECK-ALIAS: li a0, 0 li x10, 0 -# CHECK-EXPAND: addi a0, zero, 1 +# CHECK-INST: addi a0, zero, 1 +# CHECK-ALIAS: li a0, 1 li x10, 1 -# CHECK-EXPAND: addi a0, zero, -1 +# CHECK-INST: addi a0, zero, -1 +# CHECK-ALIAS: li a0, -1 li x10, -1 -# CHECK-EXPAND: addi a0, zero, 2047 +# CHECK-INST: addi a0, zero, 2047 +# CHECK-ALIAS: li a0, 2047 li x10, 2047 -# CHECK-EXPAND: addi a0, zero, -2047 +# CHECK-INST: addi a0, zero, -2047 +# CHECK-ALIAS: li a0, -2047 li x10, -2047 # CHECK-EXPAND: lui a1, 1 # CHECK-EXPAND: addiw a1, a1, -2048 li x11, 2048 -# CHECK-EXPAND: addi a1, zero, -2048 +# CHECK-INST: addi a1, zero, -2048 +# CHECK-ALIAS: li a1, -2048 li x11, -2048 # CHECK-EXPAND: lui a1, 1 # CHECK-EXPAND: addiw a1, a1, -2047 @@ -69,31 +74,46 @@ # CHECK-EXPAND: lui a2, 524288 li x12, -0x80000000 -# CHECK-EXPAND: addi a2, zero, 1 -# CHECK-EXPAND-NEXT: slli a2, a2, 31 +# CHECK-INST: addi a2, zero, 1 +# CHECK-INST-NEXT: slli a2, a2, 31 +# CHECK-ALIAS: li a2, 1 +# CHECK-ALIAS-NEXT: slli a2, a2, 31 li x12, 0x80000000 -# CHECK-EXPAND: addi a2, zero, -1 -# CHECK-EXPAND-NEXT: srli a2, a2, 32 +# CHECK-INST: addi a2, zero, -1 +# CHECK-INST-NEXT: srli a2, a2, 32 +# CHECK-ALIAS: li a2, -1 +# CHECK-ALIAS-NEXT: srli a2, a2, 32 li x12, 0xFFFFFFFF -# CHECK-EXPAND: addi t0, zero, 1 -# CHECK-EXPAND-NEXT: slli t0, t0, 32 +# CHECK-INST: addi t0, zero, 1 +# CHECK-INST-NEXT: slli t0, t0, 32 +# CHECK-ALIAS: li t0, 1 +# CHECK-ALIAS-NEXT: slli t0, t0, 32 li t0, 0x100000000 -# CHECK-EXPAND: addi t1, zero, -1 -# CHECK-EXPAND-NEXT: slli t1, t1, 63 +# CHECK-INST: addi t1, zero, -1 +# CHECK-INST-NEXT: slli t1, t1, 63 +# CHECK-ALIAS: li t1, -1 +# CHECK-ALIAS-NEXT: slli t1, t1, 63 li t1, 0x8000000000000000 -# CHECK-EXPAND: addi t1, zero, -1 -# CHECK-EXPAND-NEXT: slli t1, t1, 63 +# CHECK-INST: addi t1, zero, -1 +# CHECK-INST-NEXT: slli t1, t1, 63 +# CHECK-ALIAS: li t1, -1 +# CHECK-ALIAS-NEXT: slli t1, t1, 63 li t1, -0x8000000000000000 # CHECK-EXPAND: lui t2, 9321 # CHECK-EXPAND-NEXT: addiw t2, t2, -1329 # CHECK-EXPAND-NEXT: slli t2, t2, 35 li t2, 0x1234567800000000 -# CHECK-EXPAND: addi t3, zero, 7 -# CHECK-EXPAND-NEXT: slli t3, t3, 36 -# CHECK-EXPAND-NEXT: addi t3, t3, 11 -# CHECK-EXPAND-NEXT: slli t3, t3, 24 -# CHECK-EXPAND-NEXT: addi t3, t3, 15 +# CHECK-INST: addi t3, zero, 7 +# CHECK-INST-NEXT: slli t3, t3, 36 +# CHECK-INST-NEXT: addi t3, t3, 11 +# CHECK-INST-NEXT: slli t3, t3, 24 +# CHECK-INST-NEXT: addi t3, t3, 15 +# CHECK-ALIAS: li t3, 7 +# CHECK-ALIAS-NEXT: slli t3, t3, 36 +# CHECK-ALIAS-NEXT: addi t3, t3, 11 +# CHECK-ALIAS-NEXT: slli t3, t3, 24 +# CHECK-ALIAS-NEXT: addi t3, t3, 15 li t3, 0x700000000B00000F # CHECK-EXPAND: lui t4, 583 # CHECK-EXPAND-NEXT: addiw t4, t4, -1875 @@ -104,7 +124,8 @@ # CHECK-EXPAND-NEXT: slli t4, t4, 13 # CHECK-EXPAND-NEXT: addi t4, t4, -272 li t4, 0x123456789abcdef0 -# CHECK-EXPAND: addi t5, zero, -1 +# CHECK-INST: addi t5, zero, -1 +# CHECK-ALIAS: li t5, -1 li t5, 0xFFFFFFFFFFFFFFFF # CHECK-EXPAND: lui t6, 262145 # CHECK-EXPAND-NEXT: slli t6, t6, 1 @@ -126,20 +147,31 @@ # CHECK-EXPAND-NEXT: slli s1, s1, 20 # CHECK-EXPAND-NEXT: addi s1, s1, -3 li x9, 0x1000FFFFFFFD -# CHECK-EXPAND: addi a0, zero, -1 -# CHECK-EXPAND-NEXT: slli a0, a0, 36 -# CHECK-EXPAND-NEXT: addi a0, a0, 1 -# CHECK-EXPAND-NEXT: slli a0, a0, 25 -# CHECK-EXPAND-NEXT: addi a0, a0, -1 +# CHECK-INST: addi a0, zero, -1 +# CHECK-INST-NEXT: slli a0, a0, 36 +# CHECK-INST-NEXT: addi a0, a0, 1 +# CHECK-INST-NEXT: slli a0, a0, 25 +# CHECK-INST-NEXT: addi a0, a0, -1 +# CHECK-ALIAS: li a0, -1 +# CHECK-ALIAS-NEXT: slli a0, a0, 36 +# CHECK-ALIAS-NEXT: addi a0, a0, 1 +# CHECK-ALIAS-NEXT: slli a0, a0, 25 +# CHECK-ALIAS-NEXT: addi a0, a0, -1 li x10, 0xE000000001FFFFFF -# CHECK-EXPAND: addi a1, zero, -2047 -# CHECK-EXPAND-NEXT: slli a1, a1, 27 -# CHECK-EXPAND-NEXT: addi a1, a1, -1 -# CHECK-EXPAND-NEXT: slli a1, a1, 12 -# CHECK-EXPAND-NEXT: addi a1, a1, 2047 +# CHECK-INST: addi a1, zero, -2047 +# CHECK-INST-NEXT: slli a1, a1, 27 +# CHECK-INST-NEXT: addi a1, a1, -1 +# CHECK-INST-NEXT: slli a1, a1, 12 +# CHECK-INST-NEXT: addi a1, a1, 2047 +# CHECK-ALIAS: li a1, -2047 +# CHECK-ALIAS-NEXT: slli a1, a1, 27 +# CHECK-ALIAS-NEXT: addi a1, a1, -1 +# CHECK-ALIAS-NEXT: slli a1, a1, 12 +# CHECK-ALIAS-NEXT: addi a1, a1, 2047 li x11, 0xFFFC007FFFFFF7FF -# CHECK-EXPAND: addi a0, zero, 1110 +# CHECK-INST: addi a0, zero, 1110 +# CHECK-ALIAS: li a0, 1110 li a0, %lo(0x123456) # CHECK-OBJ-NOALIAS: addi a0, zero, 0 diff --git a/llvm/test/MC/RISCV/rv64zba-aliases-valid.s b/llvm/test/MC/RISCV/rv64zba-aliases-valid.s --- a/llvm/test/MC/RISCV/rv64zba-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv64zba-aliases-valid.s @@ -21,7 +21,7 @@ # CHECK-S-OBJ-NOALIAS: addi t1, zero, -2 # CHECK-S-OBJ-NOALIAS-NEXT: add.uw t1, t1, zero -# CHECK-S-OBJ: addi t1, zero, -2 +# CHECK-S-OBJ: li t1, -2 # CHECK-S-OBJ-NEXT: zext.w t1, t1 li x6, 0xfffffffe diff --git a/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s b/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s --- a/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s +++ b/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s @@ -49,13 +49,13 @@ # CHECK-S-OBJ-NOALIAS: addi t0, zero, 1365 # CHECK-S-OBJ-NOALIAS-NEXT: bseti t0, t0, 31 -# CHECK-S-OBJ: addi t0, zero, 1365 +# CHECK-S-OBJ: li t0, 1365 # CHECK-S-OBJ-NEXT: bseti t0, t0, 31 li x5, 2147485013 # CHECK-S-OBJ-NOALIAS: addi t0, zero, -1365 # CHECK-S-OBJ-NOALIAS-NEXT: bclri t0, t0, 31 -# CHECK-S-OBJ: addi t0, zero, -1365 +# CHECK-S-OBJ: li t0, -1365 # CHECK-S-OBJ-NEXT: bclri t0, t0, 31 li x5, -2147485013 diff --git a/llvm/test/MC/RISCV/rvi-aliases-valid.s b/llvm/test/MC/RISCV/rvi-aliases-valid.s --- a/llvm/test/MC/RISCV/rvi-aliases-valid.s +++ b/llvm/test/MC/RISCV/rvi-aliases-valid.s @@ -38,7 +38,7 @@ nop # CHECK-S-OBJ-NOALIAS: addi t6, zero, 0 -# CHECK-S-OBJ: mv t6, zero +# CHECK-S-OBJ: li t6, 0 mv x31, zero # CHECK-S-OBJ-NOALIAS: addi a2, a3, 0 # CHECK-S-OBJ: mv a2, a3 diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected @@ -6,7 +6,7 @@ define hidden i32 @"_Z54bar$ompvariant$bar"() { ; CHECK-LABEL: _Z54bar$ompvariant$bar: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: ret entry: ret i32 2