Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -9494,6 +9494,21 @@ return DAG.getNode(ISD::OR, DL, VT, Neg, TrueV); } + // (select c, 0, y) -> -!c & y + if (isNullConstant(TrueV)) { + SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, + ISD::getSetCCInverse(CCVal, VT)); + SDValue Neg = DAG.getNegative(C, DL, VT); + return DAG.getNode(ISD::AND, DL, VT, Neg, FalseV); + } + // (select c, y, 0) -> -c & y + if (isAllOnesConstant(FalseV)) { + SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, CCVal); + SDValue Neg = DAG.getNegative(C, DL, VT); + return DAG.getNode(ISD::AND, DL, VT, Neg, TrueV); + } + + return SDValue(); } case RISCVISD::BR_CC: { Index: llvm/test/CodeGen/RISCV/compress-opt-select.ll =================================================================== --- llvm/test/CodeGen/RISCV/compress-opt-select.ll +++ llvm/test/CodeGen/RISCV/compress-opt-select.ll @@ -42,7 +42,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_small_pos(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, 20 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -56,7 +56,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_small_neg(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, -20 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -70,7 +70,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_small_edge_pos(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, 31 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -84,7 +84,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_small_edge_neg(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, -32 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -99,7 +99,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_medium_ledge_pos(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, 32 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -114,7 +114,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_medium_ledge_neg(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, -33 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -129,7 +129,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_medium_pos(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, 63 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -144,7 +144,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_medium_neg(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, -63 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -159,7 +159,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_medium_bedge_pos(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, 2047 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -174,7 +174,7 @@ ; RV32IFD: RESBRNORMAL [[ANOTHER:.*]], [[REG]], [[PLACE:.*]] define i32 @f_medium_bedge_neg(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, -2047 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -185,7 +185,7 @@ ; nothing to check. define i32 @f_big_ledge_pos(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, 2048 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } @@ -196,6 +196,6 @@ ; nothing to check. define i32 @f_big_ledge_neg(i32 %in0) minsize { %cmp = icmp CMPCOND i32 %in0, -2048 - %toRet = select i1 %cmp, i32 0, i32 42 + %toRet = select i1 %cmp, i32 -99, i32 42 ret i32 %toRet } Index: llvm/test/CodeGen/RISCV/double-convert.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-convert.ll +++ llvm/test/CodeGen/RISCV/double-convert.ll @@ -108,7 +108,6 @@ ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 269824 @@ -116,44 +115,40 @@ ; RV32I-NEXT: lui a2, 1047552 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: lui a3, 794112 -; RV32I-NEXT: li s2, 0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt ; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: lui s5, 524288 -; RV32I-NEXT: bltz s4, .LBB3_2 +; RV32I-NEXT: lui s4, 524288 +; RV32I-NEXT: bltz s3, .LBB3_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: .LBB3_2: # %start -; RV32I-NEXT: bge s2, s3, .LBB3_4 +; RV32I-NEXT: blez s2, .LBB3_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s5, a1, -1 +; RV32I-NEXT: addi s4, a1, -1 ; RV32I-NEXT: .LBB3_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: call __unorddf2@plt -; RV32I-NEXT: bnez a0, .LBB3_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s2, s5 -; RV32I-NEXT: .LBB3_6: # %start -; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s4 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -192,12 +187,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB3_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a0, s2 -; RV64I-NEXT: .LBB3_6: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -355,29 +347,29 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunsdfdi@plt -; RV64I-NEXT: li s1, 0 -; RV64I-NEXT: bltz s2, .LBB6_2 -; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: .LBB6_2: # %start ; RV64I-NEXT: li a0, 1055 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: slli a1, a0, 21 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: blez a0, .LBB6_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: bgtz a0, .LBB6_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: j .LBB6_3 +; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli s1, a0, 32 -; RV64I-NEXT: .LBB6_4: # %start -; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .LBB6_3: # %start ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -604,90 +596,90 @@ ; ; RV32I-LABEL: fcvt_l_d_sat: ; RV32I: # %bb.0: # %start -; RV32I-NEXT: addi sp, sp, -32 -; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s1, a1 -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: lui a3, 802304 -; RV32I-NEXT: li s0, 0 -; RV32I-NEXT: li a2, 0 -; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: addi sp, sp, -48 +; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 278016 -; RV32I-NEXT: addi s4, a0, -1 +; RV32I-NEXT: addi s3, a0, -1 ; RV32I-NEXT: li a2, -1 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a3, s4 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a3, s3 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: sgtz a0, a0 -; RV32I-NEXT: neg s6, a0 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv s7, a0 +; RV32I-NEXT: lui a3, 802304 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: call __gedf2@plt +; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfdi@plt -; RV32I-NEXT: mv s5, a1 -; RV32I-NEXT: bltz s3, .LBB12_2 +; RV32I-NEXT: mv s6, a0 +; RV32I-NEXT: mv s4, a1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a3, s0 +; RV32I-NEXT: call __unorddf2@plt +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: bgtz s7, .LBB12_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: or s6, s6, a0 +; RV32I-NEXT: slti a0, s5, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s6 +; RV32I-NEXT: and s2, s2, a0 ; RV32I-NEXT: .LBB12_2: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 -; RV32I-NEXT: call __unorddf2@plt -; RV32I-NEXT: mv s3, s0 -; RV32I-NEXT: bnez a0, .LBB12_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s3, s6 -; RV32I-NEXT: .LBB12_4: # %start ; RV32I-NEXT: li a2, -1 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a3, s4 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a3, s3 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: lui a3, 802304 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt ; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: lui s6, 524288 -; RV32I-NEXT: bltz a0, .LBB12_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s6, s5 +; RV32I-NEXT: lui s5, 524288 +; RV32I-NEXT: bltz a0, .LBB12_4 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: mv s5, s4 +; RV32I-NEXT: .LBB12_4: # %start +; RV32I-NEXT: blez s3, .LBB12_6 +; RV32I-NEXT: # %bb.5: +; RV32I-NEXT: addi s5, a1, -1 ; RV32I-NEXT: .LBB12_6: # %start -; RV32I-NEXT: bge s0, s4, .LBB12_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: addi s6, a1, -1 -; RV32I-NEXT: .LBB12_8: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 -; RV32I-NEXT: call __unorddf2@plt -; RV32I-NEXT: bnez a0, .LBB12_10 -; RV32I-NEXT: # %bb.9: # %start -; RV32I-NEXT: mv s0, s6 -; RV32I-NEXT: .LBB12_10: # %start -; RV32I-NEXT: mv a0, s3 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 -; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a3, s0 +; RV32I-NEXT: call __unorddf2@plt +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a1, a0, s5 +; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 48 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcvt_l_d_sat: @@ -726,12 +718,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB12_7 -; RV64I-NEXT: # %bb.6: # %start -; RV64I-NEXT: mv a0, s1 -; RV64I-NEXT: .LBB12_7: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1369,45 +1358,42 @@ ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s1, a1 -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 265728 ; RV32I-NEXT: addi a3, a0, -64 -; RV32I-NEXT: li s0, 0 -; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: lui a3, 790016 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt -; RV32I-NEXT: lui s5, 1048568 -; RV32I-NEXT: bltz s4, .LBB26_2 +; RV32I-NEXT: lui s4, 1048568 +; RV32I-NEXT: bltz s3, .LBB26_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: .LBB26_2: # %start -; RV32I-NEXT: bge s0, s3, .LBB26_4 +; RV32I-NEXT: blez s2, .LBB26_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s5, a0, -1 +; RV32I-NEXT: addi s4, a0, -1 ; RV32I-NEXT: .LBB26_4: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: call __unorddf2@plt -; RV32I-NEXT: bnez a0, .LBB26_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s0, s5 -; RV32I-NEXT: .LBB26_6: # %start -; RV32I-NEXT: slli a0, s0, 16 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s4 +; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload @@ -1415,7 +1401,6 @@ ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -1452,12 +1437,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB26_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB26_6: # %start -; RV64I-NEXT: slli a0, a1, 48 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1539,29 +1522,26 @@ ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: call __fixunsdfsi@plt -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bltz s3, .LBB28_2 +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: mv a2, a1 +; RV32I-NEXT: bgtz s3, .LBB28_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: slti a2, s0, 0 +; RV32I-NEXT: addi a2, a2, -1 +; RV32I-NEXT: and a2, a2, a0 ; RV32I-NEXT: .LBB28_2: # %start -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: bgtz s0, .LBB28_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: .LBB28_4: # %start -; RV32I-NEXT: and a0, a2, a0 +; RV32I-NEXT: and a0, a2, a1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1577,29 +1557,27 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunsdfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB28_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB28_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 8312 ; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: slli a1, a0, 37 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgtz a0, .LBB28_4 -; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv a2, s2 -; RV64I-NEXT: .LBB28_4: # %start +; RV64I-NEXT: bgtz a0, .LBB28_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a2, a0, s1 +; RV64I-NEXT: .LBB28_2: # %start ; RV64I-NEXT: and a0, a2, a1 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1684,50 +1662,46 @@ ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s1, a1 -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a3, 263676 -; RV32I-NEXT: li s0, 0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: lui a3, 787968 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt ; RV32I-NEXT: li a1, -128 -; RV32I-NEXT: bltz s4, .LBB30_2 +; RV32I-NEXT: bltz s3, .LBB30_2 ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: .LBB30_2: # %start -; RV32I-NEXT: li s4, 127 -; RV32I-NEXT: blt s0, s3, .LBB30_4 +; RV32I-NEXT: li s3, 127 +; RV32I-NEXT: bgtz s2, .LBB30_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s4, a1 +; RV32I-NEXT: mv s3, a1 ; RV32I-NEXT: .LBB30_4: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: call __unorddf2@plt -; RV32I-NEXT: bnez a0, .LBB30_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s0, s4 -; RV32I-NEXT: .LBB30_6: # %start -; RV32I-NEXT: slli a0, s0, 24 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s3 +; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -1763,12 +1737,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB30_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s1 -; RV64I-NEXT: .LBB30_6: # %start -; RV64I-NEXT: slli a0, a1, 56 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1852,27 +1824,24 @@ ; RV32I-NEXT: lui a3, 263934 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: call __fixunsdfsi@plt -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bltz s3, .LBB32_2 +; RV32I-NEXT: li a1, 255 +; RV32I-NEXT: bgtz s3, .LBB32_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: slti a1, s0, 0 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: and a1, a1, a0 ; RV32I-NEXT: .LBB32_2: # %start -; RV32I-NEXT: li a0, 255 -; RV32I-NEXT: bgtz s0, .LBB32_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB32_4: # %start -; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: andi a0, a1, 255 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1888,26 +1857,24 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunsdfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB32_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB32_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 131967 ; RV64I-NEXT: slli a1, a0, 33 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: li a1, 255 -; RV64I-NEXT: bgtz a0, .LBB32_4 -; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB32_4: # %start +; RV64I-NEXT: bgtz a0, .LBB32_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a1, a0, s1 +; RV64I-NEXT: .LBB32_2: # %start ; RV64I-NEXT: andi a0, a1, 255 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1988,29 +1955,30 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunsdfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB33_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB33_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: li a0, 1055 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: slli a1, a0, 21 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: blez a0, .LBB33_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: bgtz a0, .LBB33_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: j .LBB33_3 +; RV64I-NEXT: .LBB33_2: ; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli s2, a0, 32 -; RV64I-NEXT: .LBB33_4: # %start -; RV64I-NEXT: slli a0, s2, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .LBB33_3: # %start +; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -2042,7 +2010,6 @@ ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 269824 @@ -2050,44 +2017,40 @@ ; RV32I-NEXT: lui a2, 1047552 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gtdf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: lui a3, 794112 -; RV32I-NEXT: li s2, 0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt ; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: lui s5, 524288 -; RV32I-NEXT: bltz s4, .LBB34_2 +; RV32I-NEXT: lui s4, 524288 +; RV32I-NEXT: bltz s3, .LBB34_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: .LBB34_2: # %start -; RV32I-NEXT: bge s2, s3, .LBB34_4 +; RV32I-NEXT: blez s2, .LBB34_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s5, a1, -1 +; RV32I-NEXT: addi s4, a1, -1 ; RV32I-NEXT: .LBB34_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: call __unorddf2@plt -; RV32I-NEXT: bnez a0, .LBB34_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s2, s5 -; RV32I-NEXT: .LBB34_6: # %start -; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s4 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -2126,12 +2089,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB34_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB34_6: # %start -; RV64I-NEXT: sext.w a0, a1 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload Index: llvm/test/CodeGen/RISCV/float-convert.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-convert.ll +++ llvm/test/CodeGen/RISCV/float-convert.ll @@ -76,12 +76,9 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bnez a1, .LBB1_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: .LBB1_6: # %start +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -121,12 +118,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB1_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a0, s2 -; RV64I-NEXT: .LBB1_6: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -275,27 +269,27 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s1, 0 -; RV64I-NEXT: bltz s2, .LBB4_2 -; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: .LBB4_2: # %start ; RV64I-NEXT: lui a0, 325632 ; RV64I-NEXT: addiw a1, a0, -1 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: blez a0, .LBB4_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: bgtz a0, .LBB4_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: j .LBB4_3 +; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli s1, a0, 32 -; RV64I-NEXT: .LBB4_4: # %start -; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .LBB4_3: # %start ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -592,60 +586,54 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 389120 -; RV32I-NEXT: addi s2, a0, -1 -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: li s0, 0 -; RV32I-NEXT: sgtz a0, a0 -; RV32I-NEXT: neg s5, a0 -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: call __fixsfdi@plt -; RV32I-NEXT: mv s3, a0 -; RV32I-NEXT: mv s4, a1 +; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 913408 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: bltz a0, .LBB12_2 +; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: call __fixsfdi@plt +; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call __unordsf2@plt +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi s1, a0, -1 +; RV32I-NEXT: lui a0, 389120 +; RV32I-NEXT: addi s3, a0, -1 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: call __gtsf2@plt +; RV32I-NEXT: bgtz a0, .LBB12_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: or s5, s5, s3 +; RV32I-NEXT: slti a0, s4, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s5 +; RV32I-NEXT: and s1, s1, a0 ; RV32I-NEXT: .LBB12_2: # %start -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv s3, s0 -; RV32I-NEXT: bnez a0, .LBB12_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s3, s5 -; RV32I-NEXT: .LBB12_4: # %start ; RV32I-NEXT: lui a1, 913408 -; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: lui s6, 524288 ; RV32I-NEXT: lui s5, 524288 -; RV32I-NEXT: bltz a0, .LBB12_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s5, s4 -; RV32I-NEXT: .LBB12_6: # %start -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: lui s4, 524288 +; RV32I-NEXT: bltz a0, .LBB12_4 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: mv s4, s2 +; RV32I-NEXT: .LBB12_4: # %start +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: bge s0, a0, .LBB12_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: addi s5, s6, -1 -; RV32I-NEXT: .LBB12_8: # %start -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: bnez a0, .LBB12_10 -; RV32I-NEXT: # %bb.9: # %start -; RV32I-NEXT: mv s0, s5 -; RV32I-NEXT: .LBB12_10: # %start -; RV32I-NEXT: mv a0, s3 +; RV32I-NEXT: blez a0, .LBB12_6 +; RV32I-NEXT: # %bb.5: +; RV32I-NEXT: addi s4, s5, -1 +; RV32I-NEXT: .LBB12_6: # %start +; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call __unordsf2@plt +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a1, a0, s4 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -653,7 +641,6 @@ ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -690,12 +677,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB12_7 -; RV64I-NEXT: # %bb.6: # %start -; RV64I-NEXT: mv a0, s1 -; RV64I-NEXT: .LBB12_7: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1256,12 +1240,10 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bnez a0, .LBB24_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: .LBB24_6: # %start -; RV32I-NEXT: slli a0, a1, 16 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -1300,12 +1282,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB24_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB24_6: # %start -; RV64I-NEXT: slli a0, a1, 48 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1379,28 +1359,26 @@ ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: li s2, 0 -; RV32I-NEXT: bltz s1, .LBB26_2 -; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: .LBB26_2: # %start +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, 292864 ; RV32I-NEXT: addi a1, a0, -256 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgtz a0, .LBB26_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: .LBB26_4: # %start +; RV32I-NEXT: bgtz a0, .LBB26_2 +; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: slti a0, s0, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a2, a0, s1 +; RV32I-NEXT: .LBB26_2: # %start ; RV32I-NEXT: and a0, a2, a1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -1416,28 +1394,26 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB26_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB26_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 292864 ; RV64I-NEXT: addiw a1, a0, -256 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgtz a0, .LBB26_4 -; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv a2, s2 -; RV64I-NEXT: .LBB26_4: # %start +; RV64I-NEXT: bgtz a0, .LBB26_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a2, a0, s1 +; RV64I-NEXT: .LBB26_2: # %start ; RV64I-NEXT: and a0, a2, a1 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1543,12 +1519,10 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bnez a0, .LBB28_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: .LBB28_6: # %start -; RV32I-NEXT: slli a0, a1, 24 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -1586,12 +1560,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB28_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s1 -; RV64I-NEXT: .LBB28_6: # %start -; RV64I-NEXT: slli a0, a1, 56 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1665,25 +1637,23 @@ ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill -; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: li s2, 0 -; RV32I-NEXT: bltz s1, .LBB30_2 -; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: .LBB30_2: # %start +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a1, 276464 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: li a1, 255 -; RV32I-NEXT: bgtz a0, .LBB30_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: .LBB30_4: # %start +; RV32I-NEXT: bgtz a0, .LBB30_2 +; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: slti a0, s0, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a1, a0, s1 +; RV32I-NEXT: .LBB30_2: # %start ; RV32I-NEXT: andi a0, a1, 255 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -1699,25 +1669,23 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB30_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB30_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a1, 276464 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: li a1, 255 -; RV64I-NEXT: bgtz a0, .LBB30_4 -; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB30_4: # %start +; RV64I-NEXT: bgtz a0, .LBB30_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a1, a0, s1 +; RV64I-NEXT: .LBB30_2: # %start ; RV64I-NEXT: andi a0, a1, 255 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1791,27 +1759,28 @@ ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB31_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB31_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 325632 ; RV64I-NEXT: addiw a1, a0, -1 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: blez a0, .LBB31_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: bgtz a0, .LBB31_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: j .LBB31_3 +; RV64I-NEXT: .LBB31_2: ; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli s2, a0, 32 -; RV64I-NEXT: .LBB31_4: # %start -; RV64I-NEXT: slli a0, s2, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .LBB31_3: # %start +; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1865,12 +1834,9 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bnez a1, .LBB32_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: .LBB32_6: # %start +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1910,12 +1876,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB32_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB32_6: # %start -; RV64I-NEXT: sext.w a0, a1 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload Index: llvm/test/CodeGen/RISCV/fpclamptosat.ll =================================================================== --- llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -1284,20 +1284,18 @@ ; RV32IF-NEXT: seqz a2, a1 ; RV32IF-NEXT: .LBB19_3: # %entry ; RV32IF-NEXT: xori a1, a1, 1 -; RV32IF-NEXT: or a1, a1, a0 -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: beqz a1, .LBB19_5 +; RV32IF-NEXT: or a0, a1, a0 +; RV32IF-NEXT: seqz a0, a0 +; RV32IF-NEXT: addi a0, a0, -1 +; RV32IF-NEXT: and a0, a0, a2 +; RV32IF-NEXT: bnez a0, .LBB19_5 ; RV32IF-NEXT: # %bb.4: # %entry -; RV32IF-NEXT: mv a0, a2 -; RV32IF-NEXT: .LBB19_5: # %entry -; RV32IF-NEXT: bnez a0, .LBB19_7 -; RV32IF-NEXT: # %bb.6: # %entry ; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: j .LBB19_8 -; RV32IF-NEXT: .LBB19_7: +; RV32IF-NEXT: j .LBB19_6 +; RV32IF-NEXT: .LBB19_5: ; RV32IF-NEXT: lw a1, 12(sp) ; RV32IF-NEXT: lw a0, 8(sp) -; RV32IF-NEXT: .LBB19_8: # %entry +; RV32IF-NEXT: .LBB19_6: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -1335,20 +1333,18 @@ ; RV32IFD-NEXT: seqz a2, a1 ; RV32IFD-NEXT: .LBB19_3: # %entry ; RV32IFD-NEXT: xori a1, a1, 1 -; RV32IFD-NEXT: or a1, a1, a0 -; RV32IFD-NEXT: li a0, 0 -; RV32IFD-NEXT: beqz a1, .LBB19_5 +; RV32IFD-NEXT: or a0, a1, a0 +; RV32IFD-NEXT: seqz a0, a0 +; RV32IFD-NEXT: addi a0, a0, -1 +; RV32IFD-NEXT: and a0, a0, a2 +; RV32IFD-NEXT: bnez a0, .LBB19_5 ; RV32IFD-NEXT: # %bb.4: # %entry -; RV32IFD-NEXT: mv a0, a2 -; RV32IFD-NEXT: .LBB19_5: # %entry -; RV32IFD-NEXT: bnez a0, .LBB19_7 -; RV32IFD-NEXT: # %bb.6: # %entry ; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: j .LBB19_8 -; RV32IFD-NEXT: .LBB19_7: +; RV32IFD-NEXT: j .LBB19_6 +; RV32IFD-NEXT: .LBB19_5: ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: .LBB19_8: # %entry +; RV32IFD-NEXT: .LBB19_6: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -1381,45 +1377,43 @@ ; RV32IF-NEXT: seqz a0, a3 ; RV32IF-NEXT: .LBB20_3: # %entry ; RV32IF-NEXT: xori a1, a3, 1 -; RV32IF-NEXT: or a4, a1, a2 -; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: beqz a4, .LBB20_5 +; RV32IF-NEXT: or a1, a1, a2 +; RV32IF-NEXT: seqz a1, a1 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: bnez a0, .LBB20_7 ; RV32IF-NEXT: # %bb.4: # %entry -; RV32IF-NEXT: mv a1, a0 -; RV32IF-NEXT: .LBB20_5: # %entry -; RV32IF-NEXT: bnez a1, .LBB20_9 -; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: li a0, 0 +; RV32IF-NEXT: li a1, 0 ; RV32IF-NEXT: li a2, 0 ; RV32IF-NEXT: li a3, 1 -; RV32IF-NEXT: bnez a2, .LBB20_10 -; RV32IF-NEXT: .LBB20_7: +; RV32IF-NEXT: bnez a2, .LBB20_8 +; RV32IF-NEXT: .LBB20_5: ; RV32IF-NEXT: snez a4, a3 -; RV32IF-NEXT: bnez a1, .LBB20_11 -; RV32IF-NEXT: .LBB20_8: +; RV32IF-NEXT: bnez a1, .LBB20_9 +; RV32IF-NEXT: .LBB20_6: ; RV32IF-NEXT: snez a5, a0 ; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: bnez a2, .LBB20_12 -; RV32IF-NEXT: j .LBB20_13 -; RV32IF-NEXT: .LBB20_9: +; RV32IF-NEXT: bnez a2, .LBB20_10 +; RV32IF-NEXT: j .LBB20_11 +; RV32IF-NEXT: .LBB20_7: ; RV32IF-NEXT: lw a1, 12(sp) ; RV32IF-NEXT: lw a0, 8(sp) -; RV32IF-NEXT: beqz a2, .LBB20_7 -; RV32IF-NEXT: .LBB20_10: # %entry +; RV32IF-NEXT: beqz a2, .LBB20_5 +; RV32IF-NEXT: .LBB20_8: # %entry ; RV32IF-NEXT: sgtz a4, a2 -; RV32IF-NEXT: beqz a1, .LBB20_8 -; RV32IF-NEXT: .LBB20_11: # %entry +; RV32IF-NEXT: beqz a1, .LBB20_6 +; RV32IF-NEXT: .LBB20_9: # %entry ; RV32IF-NEXT: snez a5, a1 ; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: beqz a2, .LBB20_13 -; RV32IF-NEXT: .LBB20_12: # %entry +; RV32IF-NEXT: beqz a2, .LBB20_11 +; RV32IF-NEXT: .LBB20_10: # %entry ; RV32IF-NEXT: mv a5, a4 -; RV32IF-NEXT: .LBB20_13: # %entry -; RV32IF-NEXT: bnez a5, .LBB20_15 -; RV32IF-NEXT: # %bb.14: # %entry +; RV32IF-NEXT: .LBB20_11: # %entry +; RV32IF-NEXT: bnez a5, .LBB20_13 +; RV32IF-NEXT: # %bb.12: # %entry ; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: .LBB20_15: # %entry +; RV32IF-NEXT: .LBB20_13: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -1469,45 +1463,43 @@ ; RV32IFD-NEXT: seqz a0, a3 ; RV32IFD-NEXT: .LBB20_3: # %entry ; RV32IFD-NEXT: xori a1, a3, 1 -; RV32IFD-NEXT: or a4, a1, a2 -; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: beqz a4, .LBB20_5 +; RV32IFD-NEXT: or a1, a1, a2 +; RV32IFD-NEXT: seqz a1, a1 +; RV32IFD-NEXT: addi a1, a1, -1 +; RV32IFD-NEXT: and a0, a1, a0 +; RV32IFD-NEXT: bnez a0, .LBB20_7 ; RV32IFD-NEXT: # %bb.4: # %entry -; RV32IFD-NEXT: mv a1, a0 -; RV32IFD-NEXT: .LBB20_5: # %entry -; RV32IFD-NEXT: bnez a1, .LBB20_9 -; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: li a0, 0 +; RV32IFD-NEXT: li a1, 0 ; RV32IFD-NEXT: li a2, 0 ; RV32IFD-NEXT: li a3, 1 -; RV32IFD-NEXT: bnez a2, .LBB20_10 -; RV32IFD-NEXT: .LBB20_7: +; RV32IFD-NEXT: bnez a2, .LBB20_8 +; RV32IFD-NEXT: .LBB20_5: ; RV32IFD-NEXT: snez a4, a3 -; RV32IFD-NEXT: bnez a1, .LBB20_11 -; RV32IFD-NEXT: .LBB20_8: +; RV32IFD-NEXT: bnez a1, .LBB20_9 +; RV32IFD-NEXT: .LBB20_6: ; RV32IFD-NEXT: snez a5, a0 ; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: bnez a2, .LBB20_12 -; RV32IFD-NEXT: j .LBB20_13 -; RV32IFD-NEXT: .LBB20_9: +; RV32IFD-NEXT: bnez a2, .LBB20_10 +; RV32IFD-NEXT: j .LBB20_11 +; RV32IFD-NEXT: .LBB20_7: ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: beqz a2, .LBB20_7 -; RV32IFD-NEXT: .LBB20_10: # %entry +; RV32IFD-NEXT: beqz a2, .LBB20_5 +; RV32IFD-NEXT: .LBB20_8: # %entry ; RV32IFD-NEXT: sgtz a4, a2 -; RV32IFD-NEXT: beqz a1, .LBB20_8 -; RV32IFD-NEXT: .LBB20_11: # %entry +; RV32IFD-NEXT: beqz a1, .LBB20_6 +; RV32IFD-NEXT: .LBB20_9: # %entry ; RV32IFD-NEXT: snez a5, a1 ; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: beqz a2, .LBB20_13 -; RV32IFD-NEXT: .LBB20_12: # %entry +; RV32IFD-NEXT: beqz a2, .LBB20_11 +; RV32IFD-NEXT: .LBB20_10: # %entry ; RV32IFD-NEXT: mv a5, a4 -; RV32IFD-NEXT: .LBB20_13: # %entry -; RV32IFD-NEXT: bnez a5, .LBB20_15 -; RV32IFD-NEXT: # %bb.14: # %entry +; RV32IFD-NEXT: .LBB20_11: # %entry +; RV32IFD-NEXT: bnez a5, .LBB20_13 +; RV32IFD-NEXT: # %bb.12: # %entry ; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: .LBB20_15: # %entry +; RV32IFD-NEXT: .LBB20_13: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -1618,20 +1610,18 @@ ; RV32-NEXT: seqz a2, a1 ; RV32-NEXT: .LBB22_3: # %entry ; RV32-NEXT: xori a1, a1, 1 -; RV32-NEXT: or a1, a1, a0 -; RV32-NEXT: li a0, 0 -; RV32-NEXT: beqz a1, .LBB22_5 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a2 +; RV32-NEXT: bnez a0, .LBB22_5 ; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a0, a2 -; RV32-NEXT: .LBB22_5: # %entry -; RV32-NEXT: bnez a0, .LBB22_7 -; RV32-NEXT: # %bb.6: # %entry ; RV32-NEXT: li a1, 0 -; RV32-NEXT: j .LBB22_8 -; RV32-NEXT: .LBB22_7: +; RV32-NEXT: j .LBB22_6 +; RV32-NEXT: .LBB22_5: ; RV32-NEXT: lw a1, 12(sp) ; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: .LBB22_8: # %entry +; RV32-NEXT: .LBB22_6: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1677,45 +1667,43 @@ ; RV32-NEXT: seqz a0, a3 ; RV32-NEXT: .LBB23_3: # %entry ; RV32-NEXT: xori a1, a3, 1 -; RV32-NEXT: or a4, a1, a2 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a4, .LBB23_5 +; RV32-NEXT: or a1, a1, a2 +; RV32-NEXT: seqz a1, a1 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a0, a1, a0 +; RV32-NEXT: bnez a0, .LBB23_7 ; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a1, a0 -; RV32-NEXT: .LBB23_5: # %entry -; RV32-NEXT: bnez a1, .LBB23_9 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: li a0, 0 +; RV32-NEXT: li a1, 0 ; RV32-NEXT: li a2, 0 ; RV32-NEXT: li a3, 1 -; RV32-NEXT: bnez a2, .LBB23_10 -; RV32-NEXT: .LBB23_7: +; RV32-NEXT: bnez a2, .LBB23_8 +; RV32-NEXT: .LBB23_5: ; RV32-NEXT: snez a4, a3 -; RV32-NEXT: bnez a1, .LBB23_11 -; RV32-NEXT: .LBB23_8: +; RV32-NEXT: bnez a1, .LBB23_9 +; RV32-NEXT: .LBB23_6: ; RV32-NEXT: snez a5, a0 ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB23_12 -; RV32-NEXT: j .LBB23_13 -; RV32-NEXT: .LBB23_9: +; RV32-NEXT: bnez a2, .LBB23_10 +; RV32-NEXT: j .LBB23_11 +; RV32-NEXT: .LBB23_7: ; RV32-NEXT: lw a1, 12(sp) ; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: beqz a2, .LBB23_7 -; RV32-NEXT: .LBB23_10: # %entry +; RV32-NEXT: beqz a2, .LBB23_5 +; RV32-NEXT: .LBB23_8: # %entry ; RV32-NEXT: sgtz a4, a2 -; RV32-NEXT: beqz a1, .LBB23_8 -; RV32-NEXT: .LBB23_11: # %entry +; RV32-NEXT: beqz a1, .LBB23_6 +; RV32-NEXT: .LBB23_9: # %entry ; RV32-NEXT: snez a5, a1 ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: beqz a2, .LBB23_13 -; RV32-NEXT: .LBB23_12: # %entry +; RV32-NEXT: beqz a2, .LBB23_11 +; RV32-NEXT: .LBB23_10: # %entry ; RV32-NEXT: mv a5, a4 -; RV32-NEXT: .LBB23_13: # %entry -; RV32-NEXT: bnez a5, .LBB23_15 -; RV32-NEXT: # %bb.14: # %entry +; RV32-NEXT: .LBB23_11: # %entry +; RV32-NEXT: bnez a5, .LBB23_13 +; RV32-NEXT: # %bb.12: # %entry ; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: .LBB23_15: # %entry +; RV32-NEXT: .LBB23_13: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1888,20 +1876,18 @@ ; RV32-NEXT: seqz a2, a1 ; RV32-NEXT: .LBB25_3: # %entry ; RV32-NEXT: xori a1, a1, 1 -; RV32-NEXT: or a1, a1, a0 -; RV32-NEXT: li a0, 0 -; RV32-NEXT: beqz a1, .LBB25_5 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a2 +; RV32-NEXT: bnez a0, .LBB25_5 ; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a0, a2 -; RV32-NEXT: .LBB25_5: # %entry -; RV32-NEXT: bnez a0, .LBB25_7 -; RV32-NEXT: # %bb.6: # %entry ; RV32-NEXT: li a1, 0 -; RV32-NEXT: j .LBB25_8 -; RV32-NEXT: .LBB25_7: +; RV32-NEXT: j .LBB25_6 +; RV32-NEXT: .LBB25_5: ; RV32-NEXT: lw a1, 12(sp) ; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: .LBB25_8: # %entry +; RV32-NEXT: .LBB25_6: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1951,45 +1937,43 @@ ; RV32-NEXT: seqz a0, a3 ; RV32-NEXT: .LBB26_3: # %entry ; RV32-NEXT: xori a1, a3, 1 -; RV32-NEXT: or a4, a1, a2 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a4, .LBB26_5 +; RV32-NEXT: or a1, a1, a2 +; RV32-NEXT: seqz a1, a1 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a0, a1, a0 +; RV32-NEXT: bnez a0, .LBB26_7 ; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a1, a0 -; RV32-NEXT: .LBB26_5: # %entry -; RV32-NEXT: bnez a1, .LBB26_9 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: li a0, 0 +; RV32-NEXT: li a1, 0 ; RV32-NEXT: li a2, 0 ; RV32-NEXT: li a3, 1 -; RV32-NEXT: bnez a2, .LBB26_10 -; RV32-NEXT: .LBB26_7: +; RV32-NEXT: bnez a2, .LBB26_8 +; RV32-NEXT: .LBB26_5: ; RV32-NEXT: snez a4, a3 -; RV32-NEXT: bnez a1, .LBB26_11 -; RV32-NEXT: .LBB26_8: +; RV32-NEXT: bnez a1, .LBB26_9 +; RV32-NEXT: .LBB26_6: ; RV32-NEXT: snez a5, a0 ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB26_12 -; RV32-NEXT: j .LBB26_13 -; RV32-NEXT: .LBB26_9: +; RV32-NEXT: bnez a2, .LBB26_10 +; RV32-NEXT: j .LBB26_11 +; RV32-NEXT: .LBB26_7: ; RV32-NEXT: lw a1, 12(sp) ; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: beqz a2, .LBB26_7 -; RV32-NEXT: .LBB26_10: # %entry +; RV32-NEXT: beqz a2, .LBB26_5 +; RV32-NEXT: .LBB26_8: # %entry ; RV32-NEXT: sgtz a4, a2 -; RV32-NEXT: beqz a1, .LBB26_8 -; RV32-NEXT: .LBB26_11: # %entry +; RV32-NEXT: beqz a1, .LBB26_6 +; RV32-NEXT: .LBB26_9: # %entry ; RV32-NEXT: snez a5, a1 ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: beqz a2, .LBB26_13 -; RV32-NEXT: .LBB26_12: # %entry +; RV32-NEXT: beqz a2, .LBB26_11 +; RV32-NEXT: .LBB26_10: # %entry ; RV32-NEXT: mv a5, a4 -; RV32-NEXT: .LBB26_13: # %entry -; RV32-NEXT: bnez a5, .LBB26_15 -; RV32-NEXT: # %bb.14: # %entry +; RV32-NEXT: .LBB26_11: # %entry +; RV32-NEXT: bnez a5, .LBB26_13 +; RV32-NEXT: # %bb.12: # %entry ; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: .LBB26_15: # %entry +; RV32-NEXT: .LBB26_13: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -3117,100 +3101,92 @@ ; RV32IF-NEXT: lui a4, 524288 ; RV32IF-NEXT: addi a1, a4, -1 ; RV32IF-NEXT: mv a3, a7 -; RV32IF-NEXT: bne a5, a1, .LBB45_17 +; RV32IF-NEXT: bne a5, a1, .LBB45_19 ; RV32IF-NEXT: # %bb.1: # %entry ; RV32IF-NEXT: or a6, a0, a2 -; RV32IF-NEXT: bnez a6, .LBB45_18 +; RV32IF-NEXT: bnez a6, .LBB45_20 ; RV32IF-NEXT: .LBB45_2: # %entry ; RV32IF-NEXT: mv a7, a5 -; RV32IF-NEXT: bgez a2, .LBB45_19 +; RV32IF-NEXT: bgez a2, .LBB45_21 ; RV32IF-NEXT: .LBB45_3: # %entry -; RV32IF-NEXT: bgeu a5, a1, .LBB45_20 +; RV32IF-NEXT: bgeu a5, a1, .LBB45_22 ; RV32IF-NEXT: .LBB45_4: # %entry -; RV32IF-NEXT: bnez a6, .LBB45_21 +; RV32IF-NEXT: beqz a6, .LBB45_6 ; RV32IF-NEXT: .LBB45_5: # %entry -; RV32IF-NEXT: li a6, 0 -; RV32IF-NEXT: bnez a2, .LBB45_22 +; RV32IF-NEXT: mv a5, a7 ; RV32IF-NEXT: .LBB45_6: # %entry -; RV32IF-NEXT: bgez a2, .LBB45_23 -; RV32IF-NEXT: .LBB45_7: # %entry -; RV32IF-NEXT: mv a0, a5 -; RV32IF-NEXT: bltz a2, .LBB45_24 +; RV32IF-NEXT: srai a1, a2, 31 +; RV32IF-NEXT: seqz a6, a2 +; RV32IF-NEXT: bltz a2, .LBB45_8 +; RV32IF-NEXT: # %bb.7: # %entry +; RV32IF-NEXT: li a2, 0 ; RV32IF-NEXT: .LBB45_8: # %entry +; RV32IF-NEXT: and a1, a1, a0 +; RV32IF-NEXT: addi a6, a6, -1 +; RV32IF-NEXT: mv a0, a5 +; RV32IF-NEXT: bgez a2, .LBB45_10 +; RV32IF-NEXT: # %bb.9: # %entry +; RV32IF-NEXT: lui a0, 524288 +; RV32IF-NEXT: .LBB45_10: # %entry +; RV32IF-NEXT: and a6, a6, a1 ; RV32IF-NEXT: mv a1, a5 -; RV32IF-NEXT: bltu a4, a5, .LBB45_10 -; RV32IF-NEXT: .LBB45_9: # %entry +; RV32IF-NEXT: bltu a4, a5, .LBB45_12 +; RV32IF-NEXT: # %bb.11: # %entry ; RV32IF-NEXT: lui a1, 524288 -; RV32IF-NEXT: .LBB45_10: # %entry +; RV32IF-NEXT: .LBB45_12: # %entry ; RV32IF-NEXT: and a6, a6, a2 ; RV32IF-NEXT: li a7, -1 -; RV32IF-NEXT: bne a6, a7, .LBB45_25 -; RV32IF-NEXT: # %bb.11: # %entry +; RV32IF-NEXT: bne a6, a7, .LBB45_23 +; RV32IF-NEXT: # %bb.13: # %entry ; RV32IF-NEXT: mv t0, a3 -; RV32IF-NEXT: bgeu a4, a5, .LBB45_26 -; RV32IF-NEXT: .LBB45_12: # %entry -; RV32IF-NEXT: mv a0, a3 -; RV32IF-NEXT: bne a5, a4, .LBB45_27 -; RV32IF-NEXT: .LBB45_13: # %entry -; RV32IF-NEXT: bltz a2, .LBB45_28 +; RV32IF-NEXT: bgeu a4, a5, .LBB45_24 ; RV32IF-NEXT: .LBB45_14: # %entry -; RV32IF-NEXT: beq a6, a7, .LBB45_16 -; RV32IF-NEXT: .LBB45_15: # %entry ; RV32IF-NEXT: mv a0, a3 +; RV32IF-NEXT: bne a5, a4, .LBB45_25 +; RV32IF-NEXT: .LBB45_15: # %entry +; RV32IF-NEXT: bltz a2, .LBB45_26 ; RV32IF-NEXT: .LBB45_16: # %entry +; RV32IF-NEXT: beq a6, a7, .LBB45_18 +; RV32IF-NEXT: .LBB45_17: # %entry +; RV32IF-NEXT: mv a0, a3 +; RV32IF-NEXT: .LBB45_18: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret -; RV32IF-NEXT: .LBB45_17: # %entry +; RV32IF-NEXT: .LBB45_19: # %entry ; RV32IF-NEXT: sltu a3, a5, a1 ; RV32IF-NEXT: addi a3, a3, -1 ; RV32IF-NEXT: or a3, a3, a7 ; RV32IF-NEXT: or a6, a0, a2 ; RV32IF-NEXT: beqz a6, .LBB45_2 -; RV32IF-NEXT: .LBB45_18: # %entry +; RV32IF-NEXT: .LBB45_20: # %entry ; RV32IF-NEXT: slti a3, a2, 0 ; RV32IF-NEXT: addi a3, a3, -1 ; RV32IF-NEXT: or a3, a3, a7 ; RV32IF-NEXT: mv a7, a5 ; RV32IF-NEXT: bltz a2, .LBB45_3 -; RV32IF-NEXT: .LBB45_19: # %entry +; RV32IF-NEXT: .LBB45_21: # %entry ; RV32IF-NEXT: mv a7, a1 ; RV32IF-NEXT: bltu a5, a1, .LBB45_4 -; RV32IF-NEXT: .LBB45_20: # %entry -; RV32IF-NEXT: mv a5, a1 -; RV32IF-NEXT: beqz a6, .LBB45_5 -; RV32IF-NEXT: .LBB45_21: # %entry -; RV32IF-NEXT: mv a5, a7 -; RV32IF-NEXT: li a6, 0 -; RV32IF-NEXT: beqz a2, .LBB45_6 ; RV32IF-NEXT: .LBB45_22: # %entry -; RV32IF-NEXT: srai a1, a2, 31 -; RV32IF-NEXT: and a6, a1, a0 -; RV32IF-NEXT: bltz a2, .LBB45_7 +; RV32IF-NEXT: mv a5, a1 +; RV32IF-NEXT: bnez a6, .LBB45_5 +; RV32IF-NEXT: j .LBB45_6 ; RV32IF-NEXT: .LBB45_23: # %entry -; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: mv a0, a5 -; RV32IF-NEXT: bgez a2, .LBB45_8 -; RV32IF-NEXT: .LBB45_24: # %entry -; RV32IF-NEXT: lui a0, 524288 -; RV32IF-NEXT: mv a1, a5 -; RV32IF-NEXT: bgeu a4, a5, .LBB45_9 -; RV32IF-NEXT: j .LBB45_10 -; RV32IF-NEXT: .LBB45_25: # %entry ; RV32IF-NEXT: mv a1, a0 ; RV32IF-NEXT: mv t0, a3 -; RV32IF-NEXT: bltu a4, a5, .LBB45_12 -; RV32IF-NEXT: .LBB45_26: # %entry +; RV32IF-NEXT: bltu a4, a5, .LBB45_14 +; RV32IF-NEXT: .LBB45_24: # %entry ; RV32IF-NEXT: li t0, 0 ; RV32IF-NEXT: mv a0, a3 -; RV32IF-NEXT: beq a5, a4, .LBB45_13 -; RV32IF-NEXT: .LBB45_27: # %entry +; RV32IF-NEXT: beq a5, a4, .LBB45_15 +; RV32IF-NEXT: .LBB45_25: # %entry ; RV32IF-NEXT: mv a0, t0 -; RV32IF-NEXT: bgez a2, .LBB45_14 -; RV32IF-NEXT: .LBB45_28: # %entry +; RV32IF-NEXT: bgez a2, .LBB45_16 +; RV32IF-NEXT: .LBB45_26: # %entry ; RV32IF-NEXT: li a3, 0 -; RV32IF-NEXT: bne a6, a7, .LBB45_15 -; RV32IF-NEXT: j .LBB45_16 +; RV32IF-NEXT: bne a6, a7, .LBB45_17 +; RV32IF-NEXT: j .LBB45_18 ; ; RV64IF-LABEL: stest_f64i64_mm: ; RV64IF: # %bb.0: # %entry @@ -3278,100 +3254,92 @@ ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: addi a1, a4, -1 ; RV32IFD-NEXT: mv a3, a7 -; RV32IFD-NEXT: bne a5, a1, .LBB45_17 +; RV32IFD-NEXT: bne a5, a1, .LBB45_19 ; RV32IFD-NEXT: # %bb.1: # %entry ; RV32IFD-NEXT: or a6, a0, a2 -; RV32IFD-NEXT: bnez a6, .LBB45_18 +; RV32IFD-NEXT: bnez a6, .LBB45_20 ; RV32IFD-NEXT: .LBB45_2: # %entry ; RV32IFD-NEXT: mv a7, a5 -; RV32IFD-NEXT: bgez a2, .LBB45_19 +; RV32IFD-NEXT: bgez a2, .LBB45_21 ; RV32IFD-NEXT: .LBB45_3: # %entry -; RV32IFD-NEXT: bgeu a5, a1, .LBB45_20 +; RV32IFD-NEXT: bgeu a5, a1, .LBB45_22 ; RV32IFD-NEXT: .LBB45_4: # %entry -; RV32IFD-NEXT: bnez a6, .LBB45_21 +; RV32IFD-NEXT: beqz a6, .LBB45_6 ; RV32IFD-NEXT: .LBB45_5: # %entry -; RV32IFD-NEXT: li a6, 0 -; RV32IFD-NEXT: bnez a2, .LBB45_22 +; RV32IFD-NEXT: mv a5, a7 ; RV32IFD-NEXT: .LBB45_6: # %entry -; RV32IFD-NEXT: bgez a2, .LBB45_23 -; RV32IFD-NEXT: .LBB45_7: # %entry -; RV32IFD-NEXT: mv a0, a5 -; RV32IFD-NEXT: bltz a2, .LBB45_24 +; RV32IFD-NEXT: srai a1, a2, 31 +; RV32IFD-NEXT: seqz a6, a2 +; RV32IFD-NEXT: bltz a2, .LBB45_8 +; RV32IFD-NEXT: # %bb.7: # %entry +; RV32IFD-NEXT: li a2, 0 ; RV32IFD-NEXT: .LBB45_8: # %entry +; RV32IFD-NEXT: and a1, a1, a0 +; RV32IFD-NEXT: addi a6, a6, -1 +; RV32IFD-NEXT: mv a0, a5 +; RV32IFD-NEXT: bgez a2, .LBB45_10 +; RV32IFD-NEXT: # %bb.9: # %entry +; RV32IFD-NEXT: lui a0, 524288 +; RV32IFD-NEXT: .LBB45_10: # %entry +; RV32IFD-NEXT: and a6, a6, a1 ; RV32IFD-NEXT: mv a1, a5 -; RV32IFD-NEXT: bltu a4, a5, .LBB45_10 -; RV32IFD-NEXT: .LBB45_9: # %entry +; RV32IFD-NEXT: bltu a4, a5, .LBB45_12 +; RV32IFD-NEXT: # %bb.11: # %entry ; RV32IFD-NEXT: lui a1, 524288 -; RV32IFD-NEXT: .LBB45_10: # %entry +; RV32IFD-NEXT: .LBB45_12: # %entry ; RV32IFD-NEXT: and a6, a6, a2 ; RV32IFD-NEXT: li a7, -1 -; RV32IFD-NEXT: bne a6, a7, .LBB45_25 -; RV32IFD-NEXT: # %bb.11: # %entry +; RV32IFD-NEXT: bne a6, a7, .LBB45_23 +; RV32IFD-NEXT: # %bb.13: # %entry ; RV32IFD-NEXT: mv t0, a3 -; RV32IFD-NEXT: bgeu a4, a5, .LBB45_26 -; RV32IFD-NEXT: .LBB45_12: # %entry -; RV32IFD-NEXT: mv a0, a3 -; RV32IFD-NEXT: bne a5, a4, .LBB45_27 -; RV32IFD-NEXT: .LBB45_13: # %entry -; RV32IFD-NEXT: bltz a2, .LBB45_28 +; RV32IFD-NEXT: bgeu a4, a5, .LBB45_24 ; RV32IFD-NEXT: .LBB45_14: # %entry -; RV32IFD-NEXT: beq a6, a7, .LBB45_16 -; RV32IFD-NEXT: .LBB45_15: # %entry ; RV32IFD-NEXT: mv a0, a3 +; RV32IFD-NEXT: bne a5, a4, .LBB45_25 +; RV32IFD-NEXT: .LBB45_15: # %entry +; RV32IFD-NEXT: bltz a2, .LBB45_26 ; RV32IFD-NEXT: .LBB45_16: # %entry +; RV32IFD-NEXT: beq a6, a7, .LBB45_18 +; RV32IFD-NEXT: .LBB45_17: # %entry +; RV32IFD-NEXT: mv a0, a3 +; RV32IFD-NEXT: .LBB45_18: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret -; RV32IFD-NEXT: .LBB45_17: # %entry +; RV32IFD-NEXT: .LBB45_19: # %entry ; RV32IFD-NEXT: sltu a3, a5, a1 ; RV32IFD-NEXT: addi a3, a3, -1 ; RV32IFD-NEXT: or a3, a3, a7 ; RV32IFD-NEXT: or a6, a0, a2 ; RV32IFD-NEXT: beqz a6, .LBB45_2 -; RV32IFD-NEXT: .LBB45_18: # %entry +; RV32IFD-NEXT: .LBB45_20: # %entry ; RV32IFD-NEXT: slti a3, a2, 0 ; RV32IFD-NEXT: addi a3, a3, -1 ; RV32IFD-NEXT: or a3, a3, a7 ; RV32IFD-NEXT: mv a7, a5 ; RV32IFD-NEXT: bltz a2, .LBB45_3 -; RV32IFD-NEXT: .LBB45_19: # %entry +; RV32IFD-NEXT: .LBB45_21: # %entry ; RV32IFD-NEXT: mv a7, a1 ; RV32IFD-NEXT: bltu a5, a1, .LBB45_4 -; RV32IFD-NEXT: .LBB45_20: # %entry -; RV32IFD-NEXT: mv a5, a1 -; RV32IFD-NEXT: beqz a6, .LBB45_5 -; RV32IFD-NEXT: .LBB45_21: # %entry -; RV32IFD-NEXT: mv a5, a7 -; RV32IFD-NEXT: li a6, 0 -; RV32IFD-NEXT: beqz a2, .LBB45_6 ; RV32IFD-NEXT: .LBB45_22: # %entry -; RV32IFD-NEXT: srai a1, a2, 31 -; RV32IFD-NEXT: and a6, a1, a0 -; RV32IFD-NEXT: bltz a2, .LBB45_7 +; RV32IFD-NEXT: mv a5, a1 +; RV32IFD-NEXT: bnez a6, .LBB45_5 +; RV32IFD-NEXT: j .LBB45_6 ; RV32IFD-NEXT: .LBB45_23: # %entry -; RV32IFD-NEXT: li a2, 0 -; RV32IFD-NEXT: mv a0, a5 -; RV32IFD-NEXT: bgez a2, .LBB45_8 -; RV32IFD-NEXT: .LBB45_24: # %entry -; RV32IFD-NEXT: lui a0, 524288 -; RV32IFD-NEXT: mv a1, a5 -; RV32IFD-NEXT: bgeu a4, a5, .LBB45_9 -; RV32IFD-NEXT: j .LBB45_10 -; RV32IFD-NEXT: .LBB45_25: # %entry ; RV32IFD-NEXT: mv a1, a0 ; RV32IFD-NEXT: mv t0, a3 -; RV32IFD-NEXT: bltu a4, a5, .LBB45_12 -; RV32IFD-NEXT: .LBB45_26: # %entry +; RV32IFD-NEXT: bltu a4, a5, .LBB45_14 +; RV32IFD-NEXT: .LBB45_24: # %entry ; RV32IFD-NEXT: li t0, 0 ; RV32IFD-NEXT: mv a0, a3 -; RV32IFD-NEXT: beq a5, a4, .LBB45_13 -; RV32IFD-NEXT: .LBB45_27: # %entry +; RV32IFD-NEXT: beq a5, a4, .LBB45_15 +; RV32IFD-NEXT: .LBB45_25: # %entry ; RV32IFD-NEXT: mv a0, t0 -; RV32IFD-NEXT: bgez a2, .LBB45_14 -; RV32IFD-NEXT: .LBB45_28: # %entry +; RV32IFD-NEXT: bgez a2, .LBB45_16 +; RV32IFD-NEXT: .LBB45_26: # %entry ; RV32IFD-NEXT: li a3, 0 -; RV32IFD-NEXT: bne a6, a7, .LBB45_15 -; RV32IFD-NEXT: j .LBB45_16 +; RV32IFD-NEXT: bne a6, a7, .LBB45_17 +; RV32IFD-NEXT: j .LBB45_18 ; ; RV64IFD-LABEL: stest_f64i64_mm: ; RV64IFD: # %bb.0: # %entry @@ -3401,39 +3369,30 @@ ; RV32IF-NEXT: addi a0, sp, 8 ; RV32IF-NEXT: call __fixunsdfti@plt ; RV32IF-NEXT: lw a0, 20(sp) -; RV32IF-NEXT: lw a3, 16(sp) -; RV32IF-NEXT: li a1, 0 +; RV32IF-NEXT: lw a1, 16(sp) ; RV32IF-NEXT: beqz a0, .LBB46_3 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: mv a2, a1 +; RV32IF-NEXT: li a2, 0 ; RV32IF-NEXT: beqz a2, .LBB46_4 ; RV32IF-NEXT: .LBB46_2: -; RV32IF-NEXT: lw a4, 8(sp) +; RV32IF-NEXT: lw a3, 8(sp) ; RV32IF-NEXT: j .LBB46_5 ; RV32IF-NEXT: .LBB46_3: -; RV32IF-NEXT: seqz a2, a3 +; RV32IF-NEXT: seqz a2, a1 ; RV32IF-NEXT: bnez a2, .LBB46_2 ; RV32IF-NEXT: .LBB46_4: # %entry -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: li a3, 0 ; RV32IF-NEXT: .LBB46_5: # %entry -; RV32IF-NEXT: xori a3, a3, 1 -; RV32IF-NEXT: or a3, a3, a0 -; RV32IF-NEXT: mv a0, a1 -; RV32IF-NEXT: beqz a3, .LBB46_7 -; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: mv a0, a4 -; RV32IF-NEXT: .LBB46_7: # %entry -; RV32IF-NEXT: bnez a2, .LBB46_9 -; RV32IF-NEXT: # %bb.8: # %entry -; RV32IF-NEXT: mv a2, a1 -; RV32IF-NEXT: bnez a3, .LBB46_10 -; RV32IF-NEXT: j .LBB46_11 -; RV32IF-NEXT: .LBB46_9: +; RV32IF-NEXT: xori a1, a1, 1 +; RV32IF-NEXT: or a0, a1, a0 +; RV32IF-NEXT: seqz a0, a0 +; RV32IF-NEXT: addi a1, a0, -1 +; RV32IF-NEXT: and a0, a1, a3 +; RV32IF-NEXT: beqz a2, .LBB46_7 +; RV32IF-NEXT: # %bb.6: ; RV32IF-NEXT: lw a2, 12(sp) -; RV32IF-NEXT: beqz a3, .LBB46_11 -; RV32IF-NEXT: .LBB46_10: # %entry -; RV32IF-NEXT: mv a1, a2 -; RV32IF-NEXT: .LBB46_11: # %entry +; RV32IF-NEXT: .LBB46_7: # %entry +; RV32IF-NEXT: and a1, a1, a2 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -3445,17 +3404,14 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixunsdfti@plt -; RV64-NEXT: mv a2, a0 -; RV64-NEXT: li a0, 0 ; RV64-NEXT: beqz a1, .LBB46_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: mv a2, a0 +; RV64-NEXT: li a0, 0 ; RV64-NEXT: .LBB46_2: # %entry -; RV64-NEXT: li a3, 1 -; RV64-NEXT: beq a1, a3, .LBB46_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: .LBB46_4: # %entry +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -3469,39 +3425,30 @@ ; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call __fixunsdfti@plt ; RV32IFD-NEXT: lw a0, 20(sp) -; RV32IFD-NEXT: lw a3, 16(sp) -; RV32IFD-NEXT: li a1, 0 +; RV32IFD-NEXT: lw a1, 16(sp) ; RV32IFD-NEXT: beqz a0, .LBB46_3 ; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: mv a2, a1 +; RV32IFD-NEXT: li a2, 0 ; RV32IFD-NEXT: beqz a2, .LBB46_4 ; RV32IFD-NEXT: .LBB46_2: -; RV32IFD-NEXT: lw a4, 8(sp) +; RV32IFD-NEXT: lw a3, 8(sp) ; RV32IFD-NEXT: j .LBB46_5 ; RV32IFD-NEXT: .LBB46_3: -; RV32IFD-NEXT: seqz a2, a3 +; RV32IFD-NEXT: seqz a2, a1 ; RV32IFD-NEXT: bnez a2, .LBB46_2 ; RV32IFD-NEXT: .LBB46_4: # %entry -; RV32IFD-NEXT: mv a4, a1 +; RV32IFD-NEXT: li a3, 0 ; RV32IFD-NEXT: .LBB46_5: # %entry -; RV32IFD-NEXT: xori a3, a3, 1 -; RV32IFD-NEXT: or a3, a3, a0 -; RV32IFD-NEXT: mv a0, a1 -; RV32IFD-NEXT: beqz a3, .LBB46_7 -; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: mv a0, a4 -; RV32IFD-NEXT: .LBB46_7: # %entry -; RV32IFD-NEXT: bnez a2, .LBB46_9 -; RV32IFD-NEXT: # %bb.8: # %entry -; RV32IFD-NEXT: mv a2, a1 -; RV32IFD-NEXT: bnez a3, .LBB46_10 -; RV32IFD-NEXT: j .LBB46_11 -; RV32IFD-NEXT: .LBB46_9: +; RV32IFD-NEXT: xori a1, a1, 1 +; RV32IFD-NEXT: or a0, a1, a0 +; RV32IFD-NEXT: seqz a0, a0 +; RV32IFD-NEXT: addi a1, a0, -1 +; RV32IFD-NEXT: and a0, a1, a3 +; RV32IFD-NEXT: beqz a2, .LBB46_7 +; RV32IFD-NEXT: # %bb.6: ; RV32IFD-NEXT: lw a2, 12(sp) -; RV32IFD-NEXT: beqz a3, .LBB46_11 -; RV32IFD-NEXT: .LBB46_10: # %entry -; RV32IFD-NEXT: mv a1, a2 -; RV32IFD-NEXT: .LBB46_11: # %entry +; RV32IFD-NEXT: .LBB46_7: # %entry +; RV32IFD-NEXT: and a1, a1, a2 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -3530,102 +3477,97 @@ ; RV32IF-NEXT: slti a0, a2, 0 ; RV32IF-NEXT: beqz a0, .LBB47_4 ; RV32IF-NEXT: .LBB47_2: -; RV32IF-NEXT: lw a5, 12(sp) -; RV32IF-NEXT: j .LBB47_5 +; RV32IF-NEXT: lw a1, 12(sp) +; RV32IF-NEXT: bnez a0, .LBB47_5 +; RV32IF-NEXT: j .LBB47_6 ; RV32IF-NEXT: .LBB47_3: ; RV32IF-NEXT: seqz a0, a3 ; RV32IF-NEXT: bnez a0, .LBB47_2 ; RV32IF-NEXT: .LBB47_4: # %entry -; RV32IF-NEXT: li a5, 0 -; RV32IF-NEXT: .LBB47_5: # %entry -; RV32IF-NEXT: xori a1, a3, 1 -; RV32IF-NEXT: or a4, a1, a2 ; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: beqz a4, .LBB47_7 -; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: mv a1, a5 -; RV32IF-NEXT: .LBB47_7: # %entry -; RV32IF-NEXT: bnez a0, .LBB47_9 -; RV32IF-NEXT: # %bb.8: # %entry -; RV32IF-NEXT: li a5, 0 -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: bnez a4, .LBB47_10 -; RV32IF-NEXT: j .LBB47_11 -; RV32IF-NEXT: .LBB47_9: -; RV32IF-NEXT: lw a5, 8(sp) -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: beqz a4, .LBB47_11 -; RV32IF-NEXT: .LBB47_10: # %entry -; RV32IF-NEXT: mv a0, a5 -; RV32IF-NEXT: .LBB47_11: # %entry +; RV32IF-NEXT: beqz a0, .LBB47_6 +; RV32IF-NEXT: .LBB47_5: +; RV32IF-NEXT: lw a0, 8(sp) +; RV32IF-NEXT: .LBB47_6: # %entry ; RV32IF-NEXT: li a5, 1 ; RV32IF-NEXT: mv a4, a3 -; RV32IF-NEXT: bgez a2, .LBB47_17 -; RV32IF-NEXT: # %bb.12: # %entry -; RV32IF-NEXT: bgeu a3, a5, .LBB47_18 -; RV32IF-NEXT: .LBB47_13: # %entry -; RV32IF-NEXT: bnez a2, .LBB47_19 -; RV32IF-NEXT: .LBB47_14: # %entry -; RV32IF-NEXT: bgez a2, .LBB47_20 -; RV32IF-NEXT: .LBB47_15: # %entry -; RV32IF-NEXT: beqz a2, .LBB47_21 -; RV32IF-NEXT: .LBB47_16: # %entry +; RV32IF-NEXT: bgez a2, .LBB47_12 +; RV32IF-NEXT: # %bb.7: # %entry +; RV32IF-NEXT: xori a6, a3, 1 +; RV32IF-NEXT: bgeu a3, a5, .LBB47_13 +; RV32IF-NEXT: .LBB47_8: # %entry +; RV32IF-NEXT: or a5, a6, a2 +; RV32IF-NEXT: bnez a2, .LBB47_14 +; RV32IF-NEXT: .LBB47_9: # %entry +; RV32IF-NEXT: seqz a4, a5 +; RV32IF-NEXT: bgez a2, .LBB47_15 +; RV32IF-NEXT: .LBB47_10: # %entry +; RV32IF-NEXT: addi a5, a4, -1 +; RV32IF-NEXT: beqz a2, .LBB47_16 +; RV32IF-NEXT: .LBB47_11: # %entry ; RV32IF-NEXT: sgtz a4, a2 -; RV32IF-NEXT: mv a5, a0 -; RV32IF-NEXT: beqz a4, .LBB47_22 -; RV32IF-NEXT: j .LBB47_23 -; RV32IF-NEXT: .LBB47_17: # %entry +; RV32IF-NEXT: j .LBB47_17 +; RV32IF-NEXT: .LBB47_12: # %entry ; RV32IF-NEXT: li a4, 1 -; RV32IF-NEXT: bltu a3, a5, .LBB47_13 -; RV32IF-NEXT: .LBB47_18: # %entry +; RV32IF-NEXT: xori a6, a3, 1 +; RV32IF-NEXT: bltu a3, a5, .LBB47_8 +; RV32IF-NEXT: .LBB47_13: # %entry ; RV32IF-NEXT: li a3, 1 -; RV32IF-NEXT: beqz a2, .LBB47_14 -; RV32IF-NEXT: .LBB47_19: # %entry +; RV32IF-NEXT: or a5, a6, a2 +; RV32IF-NEXT: beqz a2, .LBB47_9 +; RV32IF-NEXT: .LBB47_14: # %entry ; RV32IF-NEXT: mv a3, a4 -; RV32IF-NEXT: bltz a2, .LBB47_15 -; RV32IF-NEXT: .LBB47_20: # %entry +; RV32IF-NEXT: seqz a4, a5 +; RV32IF-NEXT: bltz a2, .LBB47_10 +; RV32IF-NEXT: .LBB47_15: # %entry ; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: bnez a2, .LBB47_16 -; RV32IF-NEXT: .LBB47_21: +; RV32IF-NEXT: addi a5, a4, -1 +; RV32IF-NEXT: bnez a2, .LBB47_11 +; RV32IF-NEXT: .LBB47_16: ; RV32IF-NEXT: snez a4, a3 +; RV32IF-NEXT: .LBB47_17: # %entry +; RV32IF-NEXT: and a0, a5, a0 +; RV32IF-NEXT: and a1, a5, a1 ; RV32IF-NEXT: mv a5, a0 -; RV32IF-NEXT: bnez a4, .LBB47_23 -; RV32IF-NEXT: .LBB47_22: # %entry -; RV32IF-NEXT: li a5, 0 -; RV32IF-NEXT: .LBB47_23: # %entry +; RV32IF-NEXT: beqz a4, .LBB47_25 +; RV32IF-NEXT: # %bb.18: # %entry ; RV32IF-NEXT: mv a6, a0 -; RV32IF-NEXT: beqz a1, .LBB47_30 -; RV32IF-NEXT: # %bb.24: # %entry -; RV32IF-NEXT: bnez a1, .LBB47_31 -; RV32IF-NEXT: .LBB47_25: # %entry +; RV32IF-NEXT: beqz a1, .LBB47_26 +; RV32IF-NEXT: .LBB47_19: # %entry +; RV32IF-NEXT: bnez a1, .LBB47_27 +; RV32IF-NEXT: .LBB47_20: # %entry ; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: bnez a2, .LBB47_32 -; RV32IF-NEXT: .LBB47_26: # %entry +; RV32IF-NEXT: bnez a2, .LBB47_28 +; RV32IF-NEXT: .LBB47_21: # %entry ; RV32IF-NEXT: mv a3, a1 -; RV32IF-NEXT: beqz a4, .LBB47_33 -; RV32IF-NEXT: .LBB47_27: # %entry -; RV32IF-NEXT: beqz a2, .LBB47_29 -; RV32IF-NEXT: .LBB47_28: # %entry +; RV32IF-NEXT: beqz a4, .LBB47_29 +; RV32IF-NEXT: .LBB47_22: # %entry +; RV32IF-NEXT: beqz a2, .LBB47_24 +; RV32IF-NEXT: .LBB47_23: # %entry ; RV32IF-NEXT: mv a1, a3 -; RV32IF-NEXT: .LBB47_29: # %entry +; RV32IF-NEXT: .LBB47_24: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret -; RV32IF-NEXT: .LBB47_30: # %entry +; RV32IF-NEXT: .LBB47_25: # %entry +; RV32IF-NEXT: li a5, 0 +; RV32IF-NEXT: mv a6, a0 +; RV32IF-NEXT: bnez a1, .LBB47_19 +; RV32IF-NEXT: .LBB47_26: # %entry ; RV32IF-NEXT: li a6, 0 -; RV32IF-NEXT: beqz a1, .LBB47_25 -; RV32IF-NEXT: .LBB47_31: # %entry +; RV32IF-NEXT: beqz a1, .LBB47_20 +; RV32IF-NEXT: .LBB47_27: # %entry ; RV32IF-NEXT: mv a0, a6 ; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: beqz a2, .LBB47_26 -; RV32IF-NEXT: .LBB47_32: # %entry +; RV32IF-NEXT: beqz a2, .LBB47_21 +; RV32IF-NEXT: .LBB47_28: # %entry ; RV32IF-NEXT: mv a0, a5 ; RV32IF-NEXT: mv a3, a1 -; RV32IF-NEXT: bnez a4, .LBB47_27 -; RV32IF-NEXT: .LBB47_33: # %entry +; RV32IF-NEXT: bnez a4, .LBB47_22 +; RV32IF-NEXT: .LBB47_29: # %entry ; RV32IF-NEXT: li a3, 0 -; RV32IF-NEXT: bnez a2, .LBB47_28 -; RV32IF-NEXT: j .LBB47_29 +; RV32IF-NEXT: bnez a2, .LBB47_23 +; RV32IF-NEXT: j .LBB47_24 ; ; RV64-LABEL: ustest_f64i64_mm: ; RV64: # %bb.0: # %entry @@ -3634,37 +3576,28 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixdfti@plt -; RV64-NEXT: mv a2, a0 -; RV64-NEXT: li a4, 1 -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: bgtz a1, .LBB47_6 +; RV64-NEXT: mv a2, a1 +; RV64-NEXT: blez a1, .LBB47_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: li a0, 0 -; RV64-NEXT: bne a1, a4, .LBB47_7 +; RV64-NEXT: li a2, 1 ; RV64-NEXT: .LBB47_2: # %entry +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: mv a1, a0 -; RV64-NEXT: blez a3, .LBB47_8 -; RV64-NEXT: .LBB47_3: # %entry -; RV64-NEXT: beqz a3, .LBB47_5 +; RV64-NEXT: bgtz a2, .LBB47_4 +; RV64-NEXT: # %bb.3: # %entry +; RV64-NEXT: li a1, 0 ; RV64-NEXT: .LBB47_4: # %entry +; RV64-NEXT: beqz a2, .LBB47_6 +; RV64-NEXT: # %bb.5: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB47_5: # %entry +; RV64-NEXT: .LBB47_6: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret -; RV64-NEXT: .LBB47_6: # %entry -; RV64-NEXT: li a2, 0 -; RV64-NEXT: li a3, 1 -; RV64-NEXT: li a0, 0 -; RV64-NEXT: beq a1, a4, .LBB47_2 -; RV64-NEXT: .LBB47_7: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: bgtz a3, .LBB47_3 -; RV64-NEXT: .LBB47_8: # %entry -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bnez a3, .LBB47_4 -; RV64-NEXT: j .LBB47_5 ; ; RV32IFD-LABEL: ustest_f64i64_mm: ; RV32IFD: # %bb.0: # %entry @@ -3681,102 +3614,97 @@ ; RV32IFD-NEXT: slti a0, a2, 0 ; RV32IFD-NEXT: beqz a0, .LBB47_4 ; RV32IFD-NEXT: .LBB47_2: -; RV32IFD-NEXT: lw a5, 12(sp) -; RV32IFD-NEXT: j .LBB47_5 +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: bnez a0, .LBB47_5 +; RV32IFD-NEXT: j .LBB47_6 ; RV32IFD-NEXT: .LBB47_3: ; RV32IFD-NEXT: seqz a0, a3 ; RV32IFD-NEXT: bnez a0, .LBB47_2 ; RV32IFD-NEXT: .LBB47_4: # %entry -; RV32IFD-NEXT: li a5, 0 -; RV32IFD-NEXT: .LBB47_5: # %entry -; RV32IFD-NEXT: xori a1, a3, 1 -; RV32IFD-NEXT: or a4, a1, a2 ; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: beqz a4, .LBB47_7 -; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: mv a1, a5 -; RV32IFD-NEXT: .LBB47_7: # %entry -; RV32IFD-NEXT: bnez a0, .LBB47_9 -; RV32IFD-NEXT: # %bb.8: # %entry -; RV32IFD-NEXT: li a5, 0 -; RV32IFD-NEXT: li a0, 0 -; RV32IFD-NEXT: bnez a4, .LBB47_10 -; RV32IFD-NEXT: j .LBB47_11 -; RV32IFD-NEXT: .LBB47_9: -; RV32IFD-NEXT: lw a5, 8(sp) -; RV32IFD-NEXT: li a0, 0 -; RV32IFD-NEXT: beqz a4, .LBB47_11 -; RV32IFD-NEXT: .LBB47_10: # %entry -; RV32IFD-NEXT: mv a0, a5 -; RV32IFD-NEXT: .LBB47_11: # %entry +; RV32IFD-NEXT: beqz a0, .LBB47_6 +; RV32IFD-NEXT: .LBB47_5: +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: .LBB47_6: # %entry ; RV32IFD-NEXT: li a5, 1 ; RV32IFD-NEXT: mv a4, a3 -; RV32IFD-NEXT: bgez a2, .LBB47_17 -; RV32IFD-NEXT: # %bb.12: # %entry -; RV32IFD-NEXT: bgeu a3, a5, .LBB47_18 -; RV32IFD-NEXT: .LBB47_13: # %entry -; RV32IFD-NEXT: bnez a2, .LBB47_19 -; RV32IFD-NEXT: .LBB47_14: # %entry -; RV32IFD-NEXT: bgez a2, .LBB47_20 -; RV32IFD-NEXT: .LBB47_15: # %entry -; RV32IFD-NEXT: beqz a2, .LBB47_21 -; RV32IFD-NEXT: .LBB47_16: # %entry +; RV32IFD-NEXT: bgez a2, .LBB47_12 +; RV32IFD-NEXT: # %bb.7: # %entry +; RV32IFD-NEXT: xori a6, a3, 1 +; RV32IFD-NEXT: bgeu a3, a5, .LBB47_13 +; RV32IFD-NEXT: .LBB47_8: # %entry +; RV32IFD-NEXT: or a5, a6, a2 +; RV32IFD-NEXT: bnez a2, .LBB47_14 +; RV32IFD-NEXT: .LBB47_9: # %entry +; RV32IFD-NEXT: seqz a4, a5 +; RV32IFD-NEXT: bgez a2, .LBB47_15 +; RV32IFD-NEXT: .LBB47_10: # %entry +; RV32IFD-NEXT: addi a5, a4, -1 +; RV32IFD-NEXT: beqz a2, .LBB47_16 +; RV32IFD-NEXT: .LBB47_11: # %entry ; RV32IFD-NEXT: sgtz a4, a2 -; RV32IFD-NEXT: mv a5, a0 -; RV32IFD-NEXT: beqz a4, .LBB47_22 -; RV32IFD-NEXT: j .LBB47_23 -; RV32IFD-NEXT: .LBB47_17: # %entry +; RV32IFD-NEXT: j .LBB47_17 +; RV32IFD-NEXT: .LBB47_12: # %entry ; RV32IFD-NEXT: li a4, 1 -; RV32IFD-NEXT: bltu a3, a5, .LBB47_13 -; RV32IFD-NEXT: .LBB47_18: # %entry +; RV32IFD-NEXT: xori a6, a3, 1 +; RV32IFD-NEXT: bltu a3, a5, .LBB47_8 +; RV32IFD-NEXT: .LBB47_13: # %entry ; RV32IFD-NEXT: li a3, 1 -; RV32IFD-NEXT: beqz a2, .LBB47_14 -; RV32IFD-NEXT: .LBB47_19: # %entry +; RV32IFD-NEXT: or a5, a6, a2 +; RV32IFD-NEXT: beqz a2, .LBB47_9 +; RV32IFD-NEXT: .LBB47_14: # %entry ; RV32IFD-NEXT: mv a3, a4 -; RV32IFD-NEXT: bltz a2, .LBB47_15 -; RV32IFD-NEXT: .LBB47_20: # %entry +; RV32IFD-NEXT: seqz a4, a5 +; RV32IFD-NEXT: bltz a2, .LBB47_10 +; RV32IFD-NEXT: .LBB47_15: # %entry ; RV32IFD-NEXT: li a2, 0 -; RV32IFD-NEXT: bnez a2, .LBB47_16 -; RV32IFD-NEXT: .LBB47_21: +; RV32IFD-NEXT: addi a5, a4, -1 +; RV32IFD-NEXT: bnez a2, .LBB47_11 +; RV32IFD-NEXT: .LBB47_16: ; RV32IFD-NEXT: snez a4, a3 +; RV32IFD-NEXT: .LBB47_17: # %entry +; RV32IFD-NEXT: and a0, a5, a0 +; RV32IFD-NEXT: and a1, a5, a1 ; RV32IFD-NEXT: mv a5, a0 -; RV32IFD-NEXT: bnez a4, .LBB47_23 -; RV32IFD-NEXT: .LBB47_22: # %entry -; RV32IFD-NEXT: li a5, 0 -; RV32IFD-NEXT: .LBB47_23: # %entry +; RV32IFD-NEXT: beqz a4, .LBB47_25 +; RV32IFD-NEXT: # %bb.18: # %entry ; RV32IFD-NEXT: mv a6, a0 -; RV32IFD-NEXT: beqz a1, .LBB47_30 -; RV32IFD-NEXT: # %bb.24: # %entry -; RV32IFD-NEXT: bnez a1, .LBB47_31 -; RV32IFD-NEXT: .LBB47_25: # %entry +; RV32IFD-NEXT: beqz a1, .LBB47_26 +; RV32IFD-NEXT: .LBB47_19: # %entry +; RV32IFD-NEXT: bnez a1, .LBB47_27 +; RV32IFD-NEXT: .LBB47_20: # %entry ; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: bnez a2, .LBB47_32 -; RV32IFD-NEXT: .LBB47_26: # %entry +; RV32IFD-NEXT: bnez a2, .LBB47_28 +; RV32IFD-NEXT: .LBB47_21: # %entry ; RV32IFD-NEXT: mv a3, a1 -; RV32IFD-NEXT: beqz a4, .LBB47_33 -; RV32IFD-NEXT: .LBB47_27: # %entry -; RV32IFD-NEXT: beqz a2, .LBB47_29 -; RV32IFD-NEXT: .LBB47_28: # %entry +; RV32IFD-NEXT: beqz a4, .LBB47_29 +; RV32IFD-NEXT: .LBB47_22: # %entry +; RV32IFD-NEXT: beqz a2, .LBB47_24 +; RV32IFD-NEXT: .LBB47_23: # %entry ; RV32IFD-NEXT: mv a1, a3 -; RV32IFD-NEXT: .LBB47_29: # %entry +; RV32IFD-NEXT: .LBB47_24: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret -; RV32IFD-NEXT: .LBB47_30: # %entry +; RV32IFD-NEXT: .LBB47_25: # %entry +; RV32IFD-NEXT: li a5, 0 +; RV32IFD-NEXT: mv a6, a0 +; RV32IFD-NEXT: bnez a1, .LBB47_19 +; RV32IFD-NEXT: .LBB47_26: # %entry ; RV32IFD-NEXT: li a6, 0 -; RV32IFD-NEXT: beqz a1, .LBB47_25 -; RV32IFD-NEXT: .LBB47_31: # %entry +; RV32IFD-NEXT: beqz a1, .LBB47_20 +; RV32IFD-NEXT: .LBB47_27: # %entry ; RV32IFD-NEXT: mv a0, a6 ; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: beqz a2, .LBB47_26 -; RV32IFD-NEXT: .LBB47_32: # %entry +; RV32IFD-NEXT: beqz a2, .LBB47_21 +; RV32IFD-NEXT: .LBB47_28: # %entry ; RV32IFD-NEXT: mv a0, a5 ; RV32IFD-NEXT: mv a3, a1 -; RV32IFD-NEXT: bnez a4, .LBB47_27 -; RV32IFD-NEXT: .LBB47_33: # %entry +; RV32IFD-NEXT: bnez a4, .LBB47_22 +; RV32IFD-NEXT: .LBB47_29: # %entry ; RV32IFD-NEXT: li a3, 0 -; RV32IFD-NEXT: bnez a2, .LBB47_28 -; RV32IFD-NEXT: j .LBB47_29 +; RV32IFD-NEXT: bnez a2, .LBB47_23 +; RV32IFD-NEXT: j .LBB47_24 entry: %conv = fptosi double %x to i128 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) @@ -3801,100 +3729,92 @@ ; RV32-NEXT: lui a4, 524288 ; RV32-NEXT: addi a1, a4, -1 ; RV32-NEXT: mv a3, a7 -; RV32-NEXT: bne a5, a1, .LBB48_17 +; RV32-NEXT: bne a5, a1, .LBB48_19 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: or a6, a0, a2 -; RV32-NEXT: bnez a6, .LBB48_18 +; RV32-NEXT: bnez a6, .LBB48_20 ; RV32-NEXT: .LBB48_2: # %entry ; RV32-NEXT: mv a7, a5 -; RV32-NEXT: bgez a2, .LBB48_19 +; RV32-NEXT: bgez a2, .LBB48_21 ; RV32-NEXT: .LBB48_3: # %entry -; RV32-NEXT: bgeu a5, a1, .LBB48_20 +; RV32-NEXT: bgeu a5, a1, .LBB48_22 ; RV32-NEXT: .LBB48_4: # %entry -; RV32-NEXT: bnez a6, .LBB48_21 +; RV32-NEXT: beqz a6, .LBB48_6 ; RV32-NEXT: .LBB48_5: # %entry -; RV32-NEXT: li a6, 0 -; RV32-NEXT: bnez a2, .LBB48_22 +; RV32-NEXT: mv a5, a7 ; RV32-NEXT: .LBB48_6: # %entry -; RV32-NEXT: bgez a2, .LBB48_23 -; RV32-NEXT: .LBB48_7: # %entry -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bltz a2, .LBB48_24 +; RV32-NEXT: srai a1, a2, 31 +; RV32-NEXT: seqz a6, a2 +; RV32-NEXT: bltz a2, .LBB48_8 +; RV32-NEXT: # %bb.7: # %entry +; RV32-NEXT: li a2, 0 ; RV32-NEXT: .LBB48_8: # %entry +; RV32-NEXT: and a1, a1, a0 +; RV32-NEXT: addi a6, a6, -1 +; RV32-NEXT: mv a0, a5 +; RV32-NEXT: bgez a2, .LBB48_10 +; RV32-NEXT: # %bb.9: # %entry +; RV32-NEXT: lui a0, 524288 +; RV32-NEXT: .LBB48_10: # %entry +; RV32-NEXT: and a6, a6, a1 ; RV32-NEXT: mv a1, a5 -; RV32-NEXT: bltu a4, a5, .LBB48_10 -; RV32-NEXT: .LBB48_9: # %entry +; RV32-NEXT: bltu a4, a5, .LBB48_12 +; RV32-NEXT: # %bb.11: # %entry ; RV32-NEXT: lui a1, 524288 -; RV32-NEXT: .LBB48_10: # %entry +; RV32-NEXT: .LBB48_12: # %entry ; RV32-NEXT: and a6, a6, a2 ; RV32-NEXT: li a7, -1 -; RV32-NEXT: bne a6, a7, .LBB48_25 -; RV32-NEXT: # %bb.11: # %entry +; RV32-NEXT: bne a6, a7, .LBB48_23 +; RV32-NEXT: # %bb.13: # %entry ; RV32-NEXT: mv t0, a3 -; RV32-NEXT: bgeu a4, a5, .LBB48_26 -; RV32-NEXT: .LBB48_12: # %entry -; RV32-NEXT: mv a0, a3 -; RV32-NEXT: bne a5, a4, .LBB48_27 -; RV32-NEXT: .LBB48_13: # %entry -; RV32-NEXT: bltz a2, .LBB48_28 +; RV32-NEXT: bgeu a4, a5, .LBB48_24 ; RV32-NEXT: .LBB48_14: # %entry -; RV32-NEXT: beq a6, a7, .LBB48_16 -; RV32-NEXT: .LBB48_15: # %entry ; RV32-NEXT: mv a0, a3 +; RV32-NEXT: bne a5, a4, .LBB48_25 +; RV32-NEXT: .LBB48_15: # %entry +; RV32-NEXT: bltz a2, .LBB48_26 ; RV32-NEXT: .LBB48_16: # %entry +; RV32-NEXT: beq a6, a7, .LBB48_18 +; RV32-NEXT: .LBB48_17: # %entry +; RV32-NEXT: mv a0, a3 +; RV32-NEXT: .LBB48_18: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB48_17: # %entry +; RV32-NEXT: .LBB48_19: # %entry ; RV32-NEXT: sltu a3, a5, a1 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: or a3, a3, a7 ; RV32-NEXT: or a6, a0, a2 ; RV32-NEXT: beqz a6, .LBB48_2 -; RV32-NEXT: .LBB48_18: # %entry +; RV32-NEXT: .LBB48_20: # %entry ; RV32-NEXT: slti a3, a2, 0 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: or a3, a3, a7 ; RV32-NEXT: mv a7, a5 ; RV32-NEXT: bltz a2, .LBB48_3 -; RV32-NEXT: .LBB48_19: # %entry +; RV32-NEXT: .LBB48_21: # %entry ; RV32-NEXT: mv a7, a1 ; RV32-NEXT: bltu a5, a1, .LBB48_4 -; RV32-NEXT: .LBB48_20: # %entry -; RV32-NEXT: mv a5, a1 -; RV32-NEXT: beqz a6, .LBB48_5 -; RV32-NEXT: .LBB48_21: # %entry -; RV32-NEXT: mv a5, a7 -; RV32-NEXT: li a6, 0 -; RV32-NEXT: beqz a2, .LBB48_6 ; RV32-NEXT: .LBB48_22: # %entry -; RV32-NEXT: srai a1, a2, 31 -; RV32-NEXT: and a6, a1, a0 -; RV32-NEXT: bltz a2, .LBB48_7 +; RV32-NEXT: mv a5, a1 +; RV32-NEXT: bnez a6, .LBB48_5 +; RV32-NEXT: j .LBB48_6 ; RV32-NEXT: .LBB48_23: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bgez a2, .LBB48_8 -; RV32-NEXT: .LBB48_24: # %entry -; RV32-NEXT: lui a0, 524288 -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: bgeu a4, a5, .LBB48_9 -; RV32-NEXT: j .LBB48_10 -; RV32-NEXT: .LBB48_25: # %entry ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: mv t0, a3 -; RV32-NEXT: bltu a4, a5, .LBB48_12 -; RV32-NEXT: .LBB48_26: # %entry +; RV32-NEXT: bltu a4, a5, .LBB48_14 +; RV32-NEXT: .LBB48_24: # %entry ; RV32-NEXT: li t0, 0 ; RV32-NEXT: mv a0, a3 -; RV32-NEXT: beq a5, a4, .LBB48_13 -; RV32-NEXT: .LBB48_27: # %entry +; RV32-NEXT: beq a5, a4, .LBB48_15 +; RV32-NEXT: .LBB48_25: # %entry ; RV32-NEXT: mv a0, t0 -; RV32-NEXT: bgez a2, .LBB48_14 -; RV32-NEXT: .LBB48_28: # %entry +; RV32-NEXT: bgez a2, .LBB48_16 +; RV32-NEXT: .LBB48_26: # %entry ; RV32-NEXT: li a3, 0 -; RV32-NEXT: bne a6, a7, .LBB48_15 -; RV32-NEXT: j .LBB48_16 +; RV32-NEXT: bne a6, a7, .LBB48_17 +; RV32-NEXT: j .LBB48_18 ; ; RV64-LABEL: stest_f32i64_mm: ; RV64: # %bb.0: # %entry @@ -3922,39 +3842,30 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixunssfti@plt ; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: li a1, 0 +; RV32-NEXT: lw a1, 16(sp) ; RV32-NEXT: beqz a0, .LBB49_3 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: mv a2, a1 +; RV32-NEXT: li a2, 0 ; RV32-NEXT: beqz a2, .LBB49_4 ; RV32-NEXT: .LBB49_2: -; RV32-NEXT: lw a4, 8(sp) +; RV32-NEXT: lw a3, 8(sp) ; RV32-NEXT: j .LBB49_5 ; RV32-NEXT: .LBB49_3: -; RV32-NEXT: seqz a2, a3 +; RV32-NEXT: seqz a2, a1 ; RV32-NEXT: bnez a2, .LBB49_2 ; RV32-NEXT: .LBB49_4: # %entry -; RV32-NEXT: mv a4, a1 +; RV32-NEXT: li a3, 0 ; RV32-NEXT: .LBB49_5: # %entry -; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a0 -; RV32-NEXT: mv a0, a1 -; RV32-NEXT: beqz a3, .LBB49_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: mv a0, a4 -; RV32-NEXT: .LBB49_7: # %entry -; RV32-NEXT: bnez a2, .LBB49_9 -; RV32-NEXT: # %bb.8: # %entry -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bnez a3, .LBB49_10 -; RV32-NEXT: j .LBB49_11 -; RV32-NEXT: .LBB49_9: +; RV32-NEXT: xori a1, a1, 1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: addi a1, a0, -1 +; RV32-NEXT: and a0, a1, a3 +; RV32-NEXT: beqz a2, .LBB49_7 +; RV32-NEXT: # %bb.6: ; RV32-NEXT: lw a2, 12(sp) -; RV32-NEXT: beqz a3, .LBB49_11 -; RV32-NEXT: .LBB49_10: # %entry -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB49_11: # %entry +; RV32-NEXT: .LBB49_7: # %entry +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -3966,17 +3877,14 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixunssfti@plt -; RV64-NEXT: mv a2, a0 -; RV64-NEXT: li a0, 0 ; RV64-NEXT: beqz a1, .LBB49_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: mv a2, a0 +; RV64-NEXT: li a0, 0 ; RV64-NEXT: .LBB49_2: # %entry -; RV64-NEXT: li a3, 1 -; RV64-NEXT: beq a1, a3, .LBB49_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: .LBB49_4: # %entry +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -4003,102 +3911,97 @@ ; RV32-NEXT: slti a0, a2, 0 ; RV32-NEXT: beqz a0, .LBB50_4 ; RV32-NEXT: .LBB50_2: -; RV32-NEXT: lw a5, 12(sp) -; RV32-NEXT: j .LBB50_5 +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: bnez a0, .LBB50_5 +; RV32-NEXT: j .LBB50_6 ; RV32-NEXT: .LBB50_3: ; RV32-NEXT: seqz a0, a3 ; RV32-NEXT: bnez a0, .LBB50_2 ; RV32-NEXT: .LBB50_4: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB50_5: # %entry -; RV32-NEXT: xori a1, a3, 1 -; RV32-NEXT: or a4, a1, a2 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a4, .LBB50_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: .LBB50_7: # %entry -; RV32-NEXT: bnez a0, .LBB50_9 -; RV32-NEXT: # %bb.8: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: li a0, 0 -; RV32-NEXT: bnez a4, .LBB50_10 -; RV32-NEXT: j .LBB50_11 -; RV32-NEXT: .LBB50_9: -; RV32-NEXT: lw a5, 8(sp) -; RV32-NEXT: li a0, 0 -; RV32-NEXT: beqz a4, .LBB50_11 -; RV32-NEXT: .LBB50_10: # %entry -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: .LBB50_11: # %entry +; RV32-NEXT: beqz a0, .LBB50_6 +; RV32-NEXT: .LBB50_5: +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: .LBB50_6: # %entry ; RV32-NEXT: li a5, 1 ; RV32-NEXT: mv a4, a3 -; RV32-NEXT: bgez a2, .LBB50_17 -; RV32-NEXT: # %bb.12: # %entry -; RV32-NEXT: bgeu a3, a5, .LBB50_18 -; RV32-NEXT: .LBB50_13: # %entry -; RV32-NEXT: bnez a2, .LBB50_19 -; RV32-NEXT: .LBB50_14: # %entry -; RV32-NEXT: bgez a2, .LBB50_20 -; RV32-NEXT: .LBB50_15: # %entry -; RV32-NEXT: beqz a2, .LBB50_21 -; RV32-NEXT: .LBB50_16: # %entry +; RV32-NEXT: bgez a2, .LBB50_12 +; RV32-NEXT: # %bb.7: # %entry +; RV32-NEXT: xori a6, a3, 1 +; RV32-NEXT: bgeu a3, a5, .LBB50_13 +; RV32-NEXT: .LBB50_8: # %entry +; RV32-NEXT: or a5, a6, a2 +; RV32-NEXT: bnez a2, .LBB50_14 +; RV32-NEXT: .LBB50_9: # %entry +; RV32-NEXT: seqz a4, a5 +; RV32-NEXT: bgez a2, .LBB50_15 +; RV32-NEXT: .LBB50_10: # %entry +; RV32-NEXT: addi a5, a4, -1 +; RV32-NEXT: beqz a2, .LBB50_16 +; RV32-NEXT: .LBB50_11: # %entry ; RV32-NEXT: sgtz a4, a2 -; RV32-NEXT: mv a5, a0 -; RV32-NEXT: beqz a4, .LBB50_22 -; RV32-NEXT: j .LBB50_23 -; RV32-NEXT: .LBB50_17: # %entry +; RV32-NEXT: j .LBB50_17 +; RV32-NEXT: .LBB50_12: # %entry ; RV32-NEXT: li a4, 1 -; RV32-NEXT: bltu a3, a5, .LBB50_13 -; RV32-NEXT: .LBB50_18: # %entry +; RV32-NEXT: xori a6, a3, 1 +; RV32-NEXT: bltu a3, a5, .LBB50_8 +; RV32-NEXT: .LBB50_13: # %entry ; RV32-NEXT: li a3, 1 -; RV32-NEXT: beqz a2, .LBB50_14 -; RV32-NEXT: .LBB50_19: # %entry +; RV32-NEXT: or a5, a6, a2 +; RV32-NEXT: beqz a2, .LBB50_9 +; RV32-NEXT: .LBB50_14: # %entry ; RV32-NEXT: mv a3, a4 -; RV32-NEXT: bltz a2, .LBB50_15 -; RV32-NEXT: .LBB50_20: # %entry +; RV32-NEXT: seqz a4, a5 +; RV32-NEXT: bltz a2, .LBB50_10 +; RV32-NEXT: .LBB50_15: # %entry ; RV32-NEXT: li a2, 0 -; RV32-NEXT: bnez a2, .LBB50_16 -; RV32-NEXT: .LBB50_21: +; RV32-NEXT: addi a5, a4, -1 +; RV32-NEXT: bnez a2, .LBB50_11 +; RV32-NEXT: .LBB50_16: ; RV32-NEXT: snez a4, a3 +; RV32-NEXT: .LBB50_17: # %entry +; RV32-NEXT: and a0, a5, a0 +; RV32-NEXT: and a1, a5, a1 ; RV32-NEXT: mv a5, a0 -; RV32-NEXT: bnez a4, .LBB50_23 -; RV32-NEXT: .LBB50_22: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB50_23: # %entry +; RV32-NEXT: beqz a4, .LBB50_25 +; RV32-NEXT: # %bb.18: # %entry ; RV32-NEXT: mv a6, a0 -; RV32-NEXT: beqz a1, .LBB50_30 -; RV32-NEXT: # %bb.24: # %entry -; RV32-NEXT: bnez a1, .LBB50_31 -; RV32-NEXT: .LBB50_25: # %entry +; RV32-NEXT: beqz a1, .LBB50_26 +; RV32-NEXT: .LBB50_19: # %entry +; RV32-NEXT: bnez a1, .LBB50_27 +; RV32-NEXT: .LBB50_20: # %entry ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB50_32 -; RV32-NEXT: .LBB50_26: # %entry +; RV32-NEXT: bnez a2, .LBB50_28 +; RV32-NEXT: .LBB50_21: # %entry ; RV32-NEXT: mv a3, a1 -; RV32-NEXT: beqz a4, .LBB50_33 -; RV32-NEXT: .LBB50_27: # %entry -; RV32-NEXT: beqz a2, .LBB50_29 -; RV32-NEXT: .LBB50_28: # %entry +; RV32-NEXT: beqz a4, .LBB50_29 +; RV32-NEXT: .LBB50_22: # %entry +; RV32-NEXT: beqz a2, .LBB50_24 +; RV32-NEXT: .LBB50_23: # %entry ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB50_29: # %entry +; RV32-NEXT: .LBB50_24: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB50_30: # %entry +; RV32-NEXT: .LBB50_25: # %entry +; RV32-NEXT: li a5, 0 +; RV32-NEXT: mv a6, a0 +; RV32-NEXT: bnez a1, .LBB50_19 +; RV32-NEXT: .LBB50_26: # %entry ; RV32-NEXT: li a6, 0 -; RV32-NEXT: beqz a1, .LBB50_25 -; RV32-NEXT: .LBB50_31: # %entry +; RV32-NEXT: beqz a1, .LBB50_20 +; RV32-NEXT: .LBB50_27: # %entry ; RV32-NEXT: mv a0, a6 ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: beqz a2, .LBB50_26 -; RV32-NEXT: .LBB50_32: # %entry +; RV32-NEXT: beqz a2, .LBB50_21 +; RV32-NEXT: .LBB50_28: # %entry ; RV32-NEXT: mv a0, a5 ; RV32-NEXT: mv a3, a1 -; RV32-NEXT: bnez a4, .LBB50_27 -; RV32-NEXT: .LBB50_33: # %entry +; RV32-NEXT: bnez a4, .LBB50_22 +; RV32-NEXT: .LBB50_29: # %entry ; RV32-NEXT: li a3, 0 -; RV32-NEXT: bnez a2, .LBB50_28 -; RV32-NEXT: j .LBB50_29 +; RV32-NEXT: bnez a2, .LBB50_23 +; RV32-NEXT: j .LBB50_24 ; ; RV64-LABEL: ustest_f32i64_mm: ; RV64: # %bb.0: # %entry @@ -4107,37 +4010,28 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixsfti@plt -; RV64-NEXT: mv a2, a0 -; RV64-NEXT: li a4, 1 -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: bgtz a1, .LBB50_6 +; RV64-NEXT: mv a2, a1 +; RV64-NEXT: blez a1, .LBB50_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: li a0, 0 -; RV64-NEXT: bne a1, a4, .LBB50_7 +; RV64-NEXT: li a2, 1 ; RV64-NEXT: .LBB50_2: # %entry +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: mv a1, a0 -; RV64-NEXT: blez a3, .LBB50_8 -; RV64-NEXT: .LBB50_3: # %entry -; RV64-NEXT: beqz a3, .LBB50_5 +; RV64-NEXT: bgtz a2, .LBB50_4 +; RV64-NEXT: # %bb.3: # %entry +; RV64-NEXT: li a1, 0 ; RV64-NEXT: .LBB50_4: # %entry +; RV64-NEXT: beqz a2, .LBB50_6 +; RV64-NEXT: # %bb.5: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB50_5: # %entry +; RV64-NEXT: .LBB50_6: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret -; RV64-NEXT: .LBB50_6: # %entry -; RV64-NEXT: li a2, 0 -; RV64-NEXT: li a3, 1 -; RV64-NEXT: li a0, 0 -; RV64-NEXT: beq a1, a4, .LBB50_2 -; RV64-NEXT: .LBB50_7: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: bgtz a3, .LBB50_3 -; RV64-NEXT: .LBB50_8: # %entry -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bnez a3, .LBB50_4 -; RV64-NEXT: j .LBB50_5 entry: %conv = fptosi float %x to i128 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) @@ -4164,100 +4058,92 @@ ; RV32-NEXT: lui a4, 524288 ; RV32-NEXT: addi a1, a4, -1 ; RV32-NEXT: mv a3, a7 -; RV32-NEXT: bne a5, a1, .LBB51_17 +; RV32-NEXT: bne a5, a1, .LBB51_19 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: or a6, a0, a2 -; RV32-NEXT: bnez a6, .LBB51_18 +; RV32-NEXT: bnez a6, .LBB51_20 ; RV32-NEXT: .LBB51_2: # %entry ; RV32-NEXT: mv a7, a5 -; RV32-NEXT: bgez a2, .LBB51_19 +; RV32-NEXT: bgez a2, .LBB51_21 ; RV32-NEXT: .LBB51_3: # %entry -; RV32-NEXT: bgeu a5, a1, .LBB51_20 +; RV32-NEXT: bgeu a5, a1, .LBB51_22 ; RV32-NEXT: .LBB51_4: # %entry -; RV32-NEXT: bnez a6, .LBB51_21 +; RV32-NEXT: beqz a6, .LBB51_6 ; RV32-NEXT: .LBB51_5: # %entry -; RV32-NEXT: li a6, 0 -; RV32-NEXT: bnez a2, .LBB51_22 +; RV32-NEXT: mv a5, a7 ; RV32-NEXT: .LBB51_6: # %entry -; RV32-NEXT: bgez a2, .LBB51_23 -; RV32-NEXT: .LBB51_7: # %entry -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bltz a2, .LBB51_24 +; RV32-NEXT: srai a1, a2, 31 +; RV32-NEXT: seqz a6, a2 +; RV32-NEXT: bltz a2, .LBB51_8 +; RV32-NEXT: # %bb.7: # %entry +; RV32-NEXT: li a2, 0 ; RV32-NEXT: .LBB51_8: # %entry +; RV32-NEXT: and a1, a1, a0 +; RV32-NEXT: addi a6, a6, -1 +; RV32-NEXT: mv a0, a5 +; RV32-NEXT: bgez a2, .LBB51_10 +; RV32-NEXT: # %bb.9: # %entry +; RV32-NEXT: lui a0, 524288 +; RV32-NEXT: .LBB51_10: # %entry +; RV32-NEXT: and a6, a6, a1 ; RV32-NEXT: mv a1, a5 -; RV32-NEXT: bltu a4, a5, .LBB51_10 -; RV32-NEXT: .LBB51_9: # %entry +; RV32-NEXT: bltu a4, a5, .LBB51_12 +; RV32-NEXT: # %bb.11: # %entry ; RV32-NEXT: lui a1, 524288 -; RV32-NEXT: .LBB51_10: # %entry +; RV32-NEXT: .LBB51_12: # %entry ; RV32-NEXT: and a6, a6, a2 ; RV32-NEXT: li a7, -1 -; RV32-NEXT: bne a6, a7, .LBB51_25 -; RV32-NEXT: # %bb.11: # %entry +; RV32-NEXT: bne a6, a7, .LBB51_23 +; RV32-NEXT: # %bb.13: # %entry ; RV32-NEXT: mv t0, a3 -; RV32-NEXT: bgeu a4, a5, .LBB51_26 -; RV32-NEXT: .LBB51_12: # %entry -; RV32-NEXT: mv a0, a3 -; RV32-NEXT: bne a5, a4, .LBB51_27 -; RV32-NEXT: .LBB51_13: # %entry -; RV32-NEXT: bltz a2, .LBB51_28 +; RV32-NEXT: bgeu a4, a5, .LBB51_24 ; RV32-NEXT: .LBB51_14: # %entry -; RV32-NEXT: beq a6, a7, .LBB51_16 -; RV32-NEXT: .LBB51_15: # %entry ; RV32-NEXT: mv a0, a3 +; RV32-NEXT: bne a5, a4, .LBB51_25 +; RV32-NEXT: .LBB51_15: # %entry +; RV32-NEXT: bltz a2, .LBB51_26 ; RV32-NEXT: .LBB51_16: # %entry +; RV32-NEXT: beq a6, a7, .LBB51_18 +; RV32-NEXT: .LBB51_17: # %entry +; RV32-NEXT: mv a0, a3 +; RV32-NEXT: .LBB51_18: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB51_17: # %entry +; RV32-NEXT: .LBB51_19: # %entry ; RV32-NEXT: sltu a3, a5, a1 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: or a3, a3, a7 ; RV32-NEXT: or a6, a0, a2 ; RV32-NEXT: beqz a6, .LBB51_2 -; RV32-NEXT: .LBB51_18: # %entry +; RV32-NEXT: .LBB51_20: # %entry ; RV32-NEXT: slti a3, a2, 0 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: or a3, a3, a7 ; RV32-NEXT: mv a7, a5 ; RV32-NEXT: bltz a2, .LBB51_3 -; RV32-NEXT: .LBB51_19: # %entry +; RV32-NEXT: .LBB51_21: # %entry ; RV32-NEXT: mv a7, a1 ; RV32-NEXT: bltu a5, a1, .LBB51_4 -; RV32-NEXT: .LBB51_20: # %entry -; RV32-NEXT: mv a5, a1 -; RV32-NEXT: beqz a6, .LBB51_5 -; RV32-NEXT: .LBB51_21: # %entry -; RV32-NEXT: mv a5, a7 -; RV32-NEXT: li a6, 0 -; RV32-NEXT: beqz a2, .LBB51_6 ; RV32-NEXT: .LBB51_22: # %entry -; RV32-NEXT: srai a1, a2, 31 -; RV32-NEXT: and a6, a1, a0 -; RV32-NEXT: bltz a2, .LBB51_7 +; RV32-NEXT: mv a5, a1 +; RV32-NEXT: bnez a6, .LBB51_5 +; RV32-NEXT: j .LBB51_6 ; RV32-NEXT: .LBB51_23: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bgez a2, .LBB51_8 -; RV32-NEXT: .LBB51_24: # %entry -; RV32-NEXT: lui a0, 524288 -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: bgeu a4, a5, .LBB51_9 -; RV32-NEXT: j .LBB51_10 -; RV32-NEXT: .LBB51_25: # %entry ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: mv t0, a3 -; RV32-NEXT: bltu a4, a5, .LBB51_12 -; RV32-NEXT: .LBB51_26: # %entry +; RV32-NEXT: bltu a4, a5, .LBB51_14 +; RV32-NEXT: .LBB51_24: # %entry ; RV32-NEXT: li t0, 0 ; RV32-NEXT: mv a0, a3 -; RV32-NEXT: beq a5, a4, .LBB51_13 -; RV32-NEXT: .LBB51_27: # %entry +; RV32-NEXT: beq a5, a4, .LBB51_15 +; RV32-NEXT: .LBB51_25: # %entry ; RV32-NEXT: mv a0, t0 -; RV32-NEXT: bgez a2, .LBB51_14 -; RV32-NEXT: .LBB51_28: # %entry +; RV32-NEXT: bgez a2, .LBB51_16 +; RV32-NEXT: .LBB51_26: # %entry ; RV32-NEXT: li a3, 0 -; RV32-NEXT: bne a6, a7, .LBB51_15 -; RV32-NEXT: j .LBB51_16 +; RV32-NEXT: bne a6, a7, .LBB51_17 +; RV32-NEXT: j .LBB51_18 ; ; RV64-LABEL: stest_f16i64_mm: ; RV64: # %bb.0: # %entry @@ -4331,39 +4217,30 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixunssfti@plt ; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: li a1, 0 +; RV32-NEXT: lw a1, 16(sp) ; RV32-NEXT: beqz a0, .LBB52_3 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: mv a2, a1 +; RV32-NEXT: li a2, 0 ; RV32-NEXT: beqz a2, .LBB52_4 ; RV32-NEXT: .LBB52_2: -; RV32-NEXT: lw a4, 8(sp) +; RV32-NEXT: lw a3, 8(sp) ; RV32-NEXT: j .LBB52_5 ; RV32-NEXT: .LBB52_3: -; RV32-NEXT: seqz a2, a3 +; RV32-NEXT: seqz a2, a1 ; RV32-NEXT: bnez a2, .LBB52_2 ; RV32-NEXT: .LBB52_4: # %entry -; RV32-NEXT: mv a4, a1 +; RV32-NEXT: li a3, 0 ; RV32-NEXT: .LBB52_5: # %entry -; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a0 -; RV32-NEXT: mv a0, a1 -; RV32-NEXT: beqz a3, .LBB52_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: mv a0, a4 -; RV32-NEXT: .LBB52_7: # %entry -; RV32-NEXT: bnez a2, .LBB52_9 -; RV32-NEXT: # %bb.8: # %entry -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bnez a3, .LBB52_10 -; RV32-NEXT: j .LBB52_11 -; RV32-NEXT: .LBB52_9: +; RV32-NEXT: xori a1, a1, 1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: addi a1, a0, -1 +; RV32-NEXT: and a0, a1, a3 +; RV32-NEXT: beqz a2, .LBB52_7 +; RV32-NEXT: # %bb.6: ; RV32-NEXT: lw a2, 12(sp) -; RV32-NEXT: beqz a3, .LBB52_11 -; RV32-NEXT: .LBB52_10: # %entry -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB52_11: # %entry +; RV32-NEXT: .LBB52_7: # %entry +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -4377,17 +4254,14 @@ ; RV64-NEXT: fmv.x.w a0, fa0 ; RV64-NEXT: call __extendhfsf2@plt ; RV64-NEXT: call __fixunssfti@plt -; RV64-NEXT: mv a2, a0 -; RV64-NEXT: li a0, 0 ; RV64-NEXT: beqz a1, .LBB52_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: mv a2, a0 +; RV64-NEXT: li a0, 0 ; RV64-NEXT: .LBB52_2: # %entry -; RV64-NEXT: li a3, 1 -; RV64-NEXT: beq a1, a3, .LBB52_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: .LBB52_4: # %entry +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -4416,102 +4290,97 @@ ; RV32-NEXT: slti a0, a2, 0 ; RV32-NEXT: beqz a0, .LBB53_4 ; RV32-NEXT: .LBB53_2: -; RV32-NEXT: lw a5, 12(sp) -; RV32-NEXT: j .LBB53_5 +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: bnez a0, .LBB53_5 +; RV32-NEXT: j .LBB53_6 ; RV32-NEXT: .LBB53_3: ; RV32-NEXT: seqz a0, a3 ; RV32-NEXT: bnez a0, .LBB53_2 ; RV32-NEXT: .LBB53_4: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB53_5: # %entry -; RV32-NEXT: xori a1, a3, 1 -; RV32-NEXT: or a4, a1, a2 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a4, .LBB53_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: .LBB53_7: # %entry -; RV32-NEXT: bnez a0, .LBB53_9 -; RV32-NEXT: # %bb.8: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: li a0, 0 -; RV32-NEXT: bnez a4, .LBB53_10 -; RV32-NEXT: j .LBB53_11 -; RV32-NEXT: .LBB53_9: -; RV32-NEXT: lw a5, 8(sp) -; RV32-NEXT: li a0, 0 -; RV32-NEXT: beqz a4, .LBB53_11 -; RV32-NEXT: .LBB53_10: # %entry -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: .LBB53_11: # %entry +; RV32-NEXT: beqz a0, .LBB53_6 +; RV32-NEXT: .LBB53_5: +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: .LBB53_6: # %entry ; RV32-NEXT: li a5, 1 ; RV32-NEXT: mv a4, a3 -; RV32-NEXT: bgez a2, .LBB53_17 -; RV32-NEXT: # %bb.12: # %entry -; RV32-NEXT: bgeu a3, a5, .LBB53_18 -; RV32-NEXT: .LBB53_13: # %entry -; RV32-NEXT: bnez a2, .LBB53_19 -; RV32-NEXT: .LBB53_14: # %entry -; RV32-NEXT: bgez a2, .LBB53_20 -; RV32-NEXT: .LBB53_15: # %entry -; RV32-NEXT: beqz a2, .LBB53_21 -; RV32-NEXT: .LBB53_16: # %entry +; RV32-NEXT: bgez a2, .LBB53_12 +; RV32-NEXT: # %bb.7: # %entry +; RV32-NEXT: xori a6, a3, 1 +; RV32-NEXT: bgeu a3, a5, .LBB53_13 +; RV32-NEXT: .LBB53_8: # %entry +; RV32-NEXT: or a5, a6, a2 +; RV32-NEXT: bnez a2, .LBB53_14 +; RV32-NEXT: .LBB53_9: # %entry +; RV32-NEXT: seqz a4, a5 +; RV32-NEXT: bgez a2, .LBB53_15 +; RV32-NEXT: .LBB53_10: # %entry +; RV32-NEXT: addi a5, a4, -1 +; RV32-NEXT: beqz a2, .LBB53_16 +; RV32-NEXT: .LBB53_11: # %entry ; RV32-NEXT: sgtz a4, a2 -; RV32-NEXT: mv a5, a0 -; RV32-NEXT: beqz a4, .LBB53_22 -; RV32-NEXT: j .LBB53_23 -; RV32-NEXT: .LBB53_17: # %entry +; RV32-NEXT: j .LBB53_17 +; RV32-NEXT: .LBB53_12: # %entry ; RV32-NEXT: li a4, 1 -; RV32-NEXT: bltu a3, a5, .LBB53_13 -; RV32-NEXT: .LBB53_18: # %entry +; RV32-NEXT: xori a6, a3, 1 +; RV32-NEXT: bltu a3, a5, .LBB53_8 +; RV32-NEXT: .LBB53_13: # %entry ; RV32-NEXT: li a3, 1 -; RV32-NEXT: beqz a2, .LBB53_14 -; RV32-NEXT: .LBB53_19: # %entry +; RV32-NEXT: or a5, a6, a2 +; RV32-NEXT: beqz a2, .LBB53_9 +; RV32-NEXT: .LBB53_14: # %entry ; RV32-NEXT: mv a3, a4 -; RV32-NEXT: bltz a2, .LBB53_15 -; RV32-NEXT: .LBB53_20: # %entry +; RV32-NEXT: seqz a4, a5 +; RV32-NEXT: bltz a2, .LBB53_10 +; RV32-NEXT: .LBB53_15: # %entry ; RV32-NEXT: li a2, 0 -; RV32-NEXT: bnez a2, .LBB53_16 -; RV32-NEXT: .LBB53_21: +; RV32-NEXT: addi a5, a4, -1 +; RV32-NEXT: bnez a2, .LBB53_11 +; RV32-NEXT: .LBB53_16: ; RV32-NEXT: snez a4, a3 +; RV32-NEXT: .LBB53_17: # %entry +; RV32-NEXT: and a0, a5, a0 +; RV32-NEXT: and a1, a5, a1 ; RV32-NEXT: mv a5, a0 -; RV32-NEXT: bnez a4, .LBB53_23 -; RV32-NEXT: .LBB53_22: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB53_23: # %entry +; RV32-NEXT: beqz a4, .LBB53_25 +; RV32-NEXT: # %bb.18: # %entry ; RV32-NEXT: mv a6, a0 -; RV32-NEXT: beqz a1, .LBB53_30 -; RV32-NEXT: # %bb.24: # %entry -; RV32-NEXT: bnez a1, .LBB53_31 -; RV32-NEXT: .LBB53_25: # %entry +; RV32-NEXT: beqz a1, .LBB53_26 +; RV32-NEXT: .LBB53_19: # %entry +; RV32-NEXT: bnez a1, .LBB53_27 +; RV32-NEXT: .LBB53_20: # %entry ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB53_32 -; RV32-NEXT: .LBB53_26: # %entry +; RV32-NEXT: bnez a2, .LBB53_28 +; RV32-NEXT: .LBB53_21: # %entry ; RV32-NEXT: mv a3, a1 -; RV32-NEXT: beqz a4, .LBB53_33 -; RV32-NEXT: .LBB53_27: # %entry -; RV32-NEXT: beqz a2, .LBB53_29 -; RV32-NEXT: .LBB53_28: # %entry +; RV32-NEXT: beqz a4, .LBB53_29 +; RV32-NEXT: .LBB53_22: # %entry +; RV32-NEXT: beqz a2, .LBB53_24 +; RV32-NEXT: .LBB53_23: # %entry ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB53_29: # %entry +; RV32-NEXT: .LBB53_24: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB53_30: # %entry +; RV32-NEXT: .LBB53_25: # %entry +; RV32-NEXT: li a5, 0 +; RV32-NEXT: mv a6, a0 +; RV32-NEXT: bnez a1, .LBB53_19 +; RV32-NEXT: .LBB53_26: # %entry ; RV32-NEXT: li a6, 0 -; RV32-NEXT: beqz a1, .LBB53_25 -; RV32-NEXT: .LBB53_31: # %entry +; RV32-NEXT: beqz a1, .LBB53_20 +; RV32-NEXT: .LBB53_27: # %entry ; RV32-NEXT: mv a0, a6 ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: beqz a2, .LBB53_26 -; RV32-NEXT: .LBB53_32: # %entry +; RV32-NEXT: beqz a2, .LBB53_21 +; RV32-NEXT: .LBB53_28: # %entry ; RV32-NEXT: mv a0, a5 ; RV32-NEXT: mv a3, a1 -; RV32-NEXT: bnez a4, .LBB53_27 -; RV32-NEXT: .LBB53_33: # %entry +; RV32-NEXT: bnez a4, .LBB53_22 +; RV32-NEXT: .LBB53_29: # %entry ; RV32-NEXT: li a3, 0 -; RV32-NEXT: bnez a2, .LBB53_28 -; RV32-NEXT: j .LBB53_29 +; RV32-NEXT: bnez a2, .LBB53_23 +; RV32-NEXT: j .LBB53_24 ; ; RV64-LABEL: ustest_f16i64_mm: ; RV64: # %bb.0: # %entry @@ -4522,37 +4391,28 @@ ; RV64-NEXT: fmv.x.w a0, fa0 ; RV64-NEXT: call __extendhfsf2@plt ; RV64-NEXT: call __fixsfti@plt -; RV64-NEXT: mv a2, a0 -; RV64-NEXT: li a4, 1 -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: bgtz a1, .LBB53_6 +; RV64-NEXT: mv a2, a1 +; RV64-NEXT: blez a1, .LBB53_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: li a0, 0 -; RV64-NEXT: bne a1, a4, .LBB53_7 +; RV64-NEXT: li a2, 1 ; RV64-NEXT: .LBB53_2: # %entry +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: mv a1, a0 -; RV64-NEXT: blez a3, .LBB53_8 -; RV64-NEXT: .LBB53_3: # %entry -; RV64-NEXT: beqz a3, .LBB53_5 +; RV64-NEXT: bgtz a2, .LBB53_4 +; RV64-NEXT: # %bb.3: # %entry +; RV64-NEXT: li a1, 0 ; RV64-NEXT: .LBB53_4: # %entry +; RV64-NEXT: beqz a2, .LBB53_6 +; RV64-NEXT: # %bb.5: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB53_5: # %entry +; RV64-NEXT: .LBB53_6: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret -; RV64-NEXT: .LBB53_6: # %entry -; RV64-NEXT: li a2, 0 -; RV64-NEXT: li a3, 1 -; RV64-NEXT: li a0, 0 -; RV64-NEXT: beq a1, a4, .LBB53_2 -; RV64-NEXT: .LBB53_7: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: bgtz a3, .LBB53_3 -; RV64-NEXT: .LBB53_8: # %entry -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bnez a3, .LBB53_4 -; RV64-NEXT: j .LBB53_5 entry: %conv = fptosi half %x to i128 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) Index: llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll =================================================================== --- llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -5760,36 +5760,28 @@ ; CHECK-NOV-NEXT: mv s1, a1 ; CHECK-NOV-NEXT: fmv.d fa0, fs0 ; CHECK-NOV-NEXT: call __fixunsdfti@plt -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: mv a3, a1 -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: beqz a3, .LBB46_2 +; CHECK-NOV-NEXT: beqz a1, .LBB46_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: mv a2, a1 +; CHECK-NOV-NEXT: li a0, 0 ; CHECK-NOV-NEXT: .LBB46_2: # %entry -; CHECK-NOV-NEXT: li a4, 1 -; CHECK-NOV-NEXT: mv a0, a1 -; CHECK-NOV-NEXT: bne a3, a4, .LBB46_7 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: and a0, a1, a0 +; CHECK-NOV-NEXT: beqz s1, .LBB46_4 ; CHECK-NOV-NEXT: # %bb.3: # %entry -; CHECK-NOV-NEXT: bnez s1, .LBB46_8 +; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: .LBB46_4: # %entry -; CHECK-NOV-NEXT: beq s1, a4, .LBB46_6 -; CHECK-NOV-NEXT: .LBB46_5: # %entry -; CHECK-NOV-NEXT: mv a1, s0 -; CHECK-NOV-NEXT: .LBB46_6: # %entry +; CHECK-NOV-NEXT: addi a1, s1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: and a1, a1, s0 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: addi sp, sp, 32 ; CHECK-NOV-NEXT: ret -; CHECK-NOV-NEXT: .LBB46_7: # %entry -; CHECK-NOV-NEXT: mv a0, a2 -; CHECK-NOV-NEXT: beqz s1, .LBB46_4 -; CHECK-NOV-NEXT: .LBB46_8: # %entry -; CHECK-NOV-NEXT: mv s0, a1 -; CHECK-NOV-NEXT: bne s1, a4, .LBB46_5 -; CHECK-NOV-NEXT: j .LBB46_6 ; ; CHECK-V-LABEL: utest_f64i64_mm: ; CHECK-V: # %bb.0: # %entry @@ -5817,23 +5809,24 @@ ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 ; CHECK-V-NEXT: call __fixunsdfti@plt -; CHECK-V-NEXT: li a2, 0 ; CHECK-V-NEXT: beqz s1, .LBB46_2 ; CHECK-V-NEXT: # %bb.1: # %entry -; CHECK-V-NEXT: mv s0, a2 +; CHECK-V-NEXT: li s0, 0 ; CHECK-V-NEXT: .LBB46_2: # %entry -; CHECK-V-NEXT: li a4, 1 -; CHECK-V-NEXT: mv a3, a2 -; CHECK-V-NEXT: bne s1, a4, .LBB46_7 +; CHECK-V-NEXT: addi a2, s1, -1 +; CHECK-V-NEXT: seqz a2, a2 +; CHECK-V-NEXT: addi a2, a2, -1 +; CHECK-V-NEXT: and a2, a2, s0 +; CHECK-V-NEXT: beqz a1, .LBB46_4 ; CHECK-V-NEXT: # %bb.3: # %entry -; CHECK-V-NEXT: bnez a1, .LBB46_8 +; CHECK-V-NEXT: li a0, 0 ; CHECK-V-NEXT: .LBB46_4: # %entry -; CHECK-V-NEXT: beq a1, a4, .LBB46_6 -; CHECK-V-NEXT: .LBB46_5: # %entry -; CHECK-V-NEXT: mv a2, a0 -; CHECK-V-NEXT: .LBB46_6: # %entry -; CHECK-V-NEXT: sd a2, 24(sp) -; CHECK-V-NEXT: sd a3, 32(sp) +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: seqz a1, a1 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: and a0, a1, a0 +; CHECK-V-NEXT: sd a0, 24(sp) +; CHECK-V-NEXT: sd a2, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 ; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) @@ -5849,13 +5842,6 @@ ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: addi sp, sp, 80 ; CHECK-V-NEXT: ret -; CHECK-V-NEXT: .LBB46_7: # %entry -; CHECK-V-NEXT: mv a3, s0 -; CHECK-V-NEXT: beqz a1, .LBB46_4 -; CHECK-V-NEXT: .LBB46_8: # %entry -; CHECK-V-NEXT: mv a0, a2 -; CHECK-V-NEXT: bne a1, a4, .LBB46_5 -; CHECK-V-NEXT: j .LBB46_6 entry: %conv = fptoui <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -5882,75 +5868,63 @@ ; CHECK-NOV-NEXT: mv s1, a1 ; CHECK-NOV-NEXT: fmv.d fa0, fs0 ; CHECK-NOV-NEXT: call __fixdfti@plt -; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: li a5, 1 ; CHECK-NOV-NEXT: mv a3, a1 -; CHECK-NOV-NEXT: bgtz a1, .LBB47_12 +; CHECK-NOV-NEXT: bgtz a1, .LBB47_14 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: mv a4, s1 -; CHECK-NOV-NEXT: bgtz s1, .LBB47_13 +; CHECK-NOV-NEXT: bgtz s1, .LBB47_15 ; CHECK-NOV-NEXT: .LBB47_2: # %entry -; CHECK-NOV-NEXT: bgtz a2, .LBB47_14 +; CHECK-NOV-NEXT: bgtz a1, .LBB47_16 ; CHECK-NOV-NEXT: .LBB47_3: # %entry -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: bne a2, a5, .LBB47_15 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: blez s1, .LBB47_5 ; CHECK-NOV-NEXT: .LBB47_4: # %entry -; CHECK-NOV-NEXT: bgtz s1, .LBB47_16 +; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: .LBB47_5: # %entry -; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: bne s1, a5, .LBB47_17 -; CHECK-NOV-NEXT: .LBB47_6: # %entry -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: blez a4, .LBB47_18 +; CHECK-NOV-NEXT: addi a2, s1, -1 +; CHECK-NOV-NEXT: seqz a2, a2 +; CHECK-NOV-NEXT: addi a2, a2, -1 +; CHECK-NOV-NEXT: and a2, a2, s0 +; CHECK-NOV-NEXT: seqz a5, a1 +; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: bgtz a4, .LBB47_7 +; CHECK-NOV-NEXT: # %bb.6: # %entry +; CHECK-NOV-NEXT: li a1, 0 ; CHECK-NOV-NEXT: .LBB47_7: # %entry -; CHECK-NOV-NEXT: bnez a4, .LBB47_19 -; CHECK-NOV-NEXT: .LBB47_8: # %entry +; CHECK-NOV-NEXT: addi a5, a5, -1 +; CHECK-NOV-NEXT: beqz a4, .LBB47_9 +; CHECK-NOV-NEXT: # %bb.8: # %entry ; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: blez a3, .LBB47_20 ; CHECK-NOV-NEXT: .LBB47_9: # %entry -; CHECK-NOV-NEXT: beqz a3, .LBB47_11 -; CHECK-NOV-NEXT: .LBB47_10: # %entry -; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: and a1, a5, a0 +; CHECK-NOV-NEXT: mv a0, a1 +; CHECK-NOV-NEXT: bgtz a3, .LBB47_11 +; CHECK-NOV-NEXT: # %bb.10: # %entry +; CHECK-NOV-NEXT: li a0, 0 ; CHECK-NOV-NEXT: .LBB47_11: # %entry +; CHECK-NOV-NEXT: beqz a3, .LBB47_13 +; CHECK-NOV-NEXT: # %bb.12: # %entry +; CHECK-NOV-NEXT: mv a1, a0 +; CHECK-NOV-NEXT: .LBB47_13: # %entry +; CHECK-NOV-NEXT: mv a0, a2 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: addi sp, sp, 32 ; CHECK-NOV-NEXT: ret -; CHECK-NOV-NEXT: .LBB47_12: # %entry +; CHECK-NOV-NEXT: .LBB47_14: # %entry ; CHECK-NOV-NEXT: li a3, 1 ; CHECK-NOV-NEXT: mv a4, s1 ; CHECK-NOV-NEXT: blez s1, .LBB47_2 -; CHECK-NOV-NEXT: .LBB47_13: # %entry -; CHECK-NOV-NEXT: li a4, 1 -; CHECK-NOV-NEXT: blez a2, .LBB47_3 -; CHECK-NOV-NEXT: .LBB47_14: # %entry -; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: beq a2, a5, .LBB47_4 ; CHECK-NOV-NEXT: .LBB47_15: # %entry -; CHECK-NOV-NEXT: mv a1, a0 -; CHECK-NOV-NEXT: blez s1, .LBB47_5 +; CHECK-NOV-NEXT: li a4, 1 +; CHECK-NOV-NEXT: blez a1, .LBB47_3 ; CHECK-NOV-NEXT: .LBB47_16: # %entry -; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: beq s1, a5, .LBB47_6 -; CHECK-NOV-NEXT: .LBB47_17: # %entry -; CHECK-NOV-NEXT: mv a0, s0 -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: bgtz a4, .LBB47_7 -; CHECK-NOV-NEXT: .LBB47_18: # %entry -; CHECK-NOV-NEXT: li a2, 0 -; CHECK-NOV-NEXT: beqz a4, .LBB47_8 -; CHECK-NOV-NEXT: .LBB47_19: # %entry -; CHECK-NOV-NEXT: mv a0, a2 -; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: bgtz a3, .LBB47_9 -; CHECK-NOV-NEXT: .LBB47_20: # %entry -; CHECK-NOV-NEXT: li a2, 0 -; CHECK-NOV-NEXT: bnez a3, .LBB47_10 -; CHECK-NOV-NEXT: j .LBB47_11 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: bgtz s1, .LBB47_4 +; CHECK-NOV-NEXT: j .LBB47_5 ; ; CHECK-V-LABEL: ustest_f64i64_mm: ; CHECK-V: # %bb.0: # %entry @@ -5978,36 +5952,51 @@ ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 ; CHECK-V-NEXT: call __fixdfti@plt -; CHECK-V-NEXT: li a5, 1 ; CHECK-V-NEXT: mv a2, a1 -; CHECK-V-NEXT: bgtz a1, .LBB47_12 +; CHECK-V-NEXT: blez a1, .LBB47_2 ; CHECK-V-NEXT: # %bb.1: # %entry -; CHECK-V-NEXT: bgtz s0, .LBB47_13 +; CHECK-V-NEXT: li a2, 1 ; CHECK-V-NEXT: .LBB47_2: # %entry -; CHECK-V-NEXT: li a3, 0 -; CHECK-V-NEXT: bne s0, a5, .LBB47_14 -; CHECK-V-NEXT: .LBB47_3: # %entry -; CHECK-V-NEXT: bgtz a1, .LBB47_15 +; CHECK-V-NEXT: blez s0, .LBB47_4 +; CHECK-V-NEXT: # %bb.3: # %entry +; CHECK-V-NEXT: li s1, 0 ; CHECK-V-NEXT: .LBB47_4: # %entry -; CHECK-V-NEXT: li a4, 0 -; CHECK-V-NEXT: bne a1, a5, .LBB47_16 -; CHECK-V-NEXT: .LBB47_5: # %entry -; CHECK-V-NEXT: bgtz s0, .LBB47_17 +; CHECK-V-NEXT: addi a3, s0, -1 +; CHECK-V-NEXT: seqz a3, a3 +; CHECK-V-NEXT: blez a1, .LBB47_6 +; CHECK-V-NEXT: # %bb.5: # %entry +; CHECK-V-NEXT: li a0, 0 ; CHECK-V-NEXT: .LBB47_6: # %entry -; CHECK-V-NEXT: mv a0, a3 -; CHECK-V-NEXT: blez s0, .LBB47_18 -; CHECK-V-NEXT: .LBB47_7: # %entry -; CHECK-V-NEXT: bnez s0, .LBB47_19 +; CHECK-V-NEXT: addi a3, a3, -1 +; CHECK-V-NEXT: addi a4, a1, -1 +; CHECK-V-NEXT: blez s0, .LBB47_8 +; CHECK-V-NEXT: # %bb.7: # %entry +; CHECK-V-NEXT: li s0, 1 ; CHECK-V-NEXT: .LBB47_8: # %entry -; CHECK-V-NEXT: mv a0, a4 -; CHECK-V-NEXT: blez a2, .LBB47_20 -; CHECK-V-NEXT: .LBB47_9: # %entry -; CHECK-V-NEXT: beqz a2, .LBB47_11 +; CHECK-V-NEXT: and a1, a3, s1 +; CHECK-V-NEXT: seqz a4, a4 +; CHECK-V-NEXT: mv a3, a1 +; CHECK-V-NEXT: bgtz s0, .LBB47_10 +; CHECK-V-NEXT: # %bb.9: # %entry +; CHECK-V-NEXT: li a3, 0 ; CHECK-V-NEXT: .LBB47_10: # %entry -; CHECK-V-NEXT: mv a4, a0 -; CHECK-V-NEXT: .LBB47_11: # %entry -; CHECK-V-NEXT: sd a4, 24(sp) -; CHECK-V-NEXT: sd a3, 32(sp) +; CHECK-V-NEXT: addi a4, a4, -1 +; CHECK-V-NEXT: beqz s0, .LBB47_12 +; CHECK-V-NEXT: # %bb.11: # %entry +; CHECK-V-NEXT: mv a1, a3 +; CHECK-V-NEXT: .LBB47_12: # %entry +; CHECK-V-NEXT: and a0, a4, a0 +; CHECK-V-NEXT: mv a3, a0 +; CHECK-V-NEXT: bgtz a2, .LBB47_14 +; CHECK-V-NEXT: # %bb.13: # %entry +; CHECK-V-NEXT: li a3, 0 +; CHECK-V-NEXT: .LBB47_14: # %entry +; CHECK-V-NEXT: beqz a2, .LBB47_16 +; CHECK-V-NEXT: # %bb.15: # %entry +; CHECK-V-NEXT: mv a0, a3 +; CHECK-V-NEXT: .LBB47_16: # %entry +; CHECK-V-NEXT: sd a0, 24(sp) +; CHECK-V-NEXT: sd a1, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 ; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) @@ -6023,38 +6012,6 @@ ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: addi sp, sp, 80 ; CHECK-V-NEXT: ret -; CHECK-V-NEXT: .LBB47_12: # %entry -; CHECK-V-NEXT: li a2, 1 -; CHECK-V-NEXT: blez s0, .LBB47_2 -; CHECK-V-NEXT: .LBB47_13: # %entry -; CHECK-V-NEXT: li s1, 0 -; CHECK-V-NEXT: li a3, 0 -; CHECK-V-NEXT: beq s0, a5, .LBB47_3 -; CHECK-V-NEXT: .LBB47_14: # %entry -; CHECK-V-NEXT: mv a3, s1 -; CHECK-V-NEXT: blez a1, .LBB47_4 -; CHECK-V-NEXT: .LBB47_15: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: li a4, 0 -; CHECK-V-NEXT: beq a1, a5, .LBB47_5 -; CHECK-V-NEXT: .LBB47_16: # %entry -; CHECK-V-NEXT: mv a4, a0 -; CHECK-V-NEXT: blez s0, .LBB47_6 -; CHECK-V-NEXT: .LBB47_17: # %entry -; CHECK-V-NEXT: li s0, 1 -; CHECK-V-NEXT: mv a0, a3 -; CHECK-V-NEXT: bgtz s0, .LBB47_7 -; CHECK-V-NEXT: .LBB47_18: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: beqz s0, .LBB47_8 -; CHECK-V-NEXT: .LBB47_19: # %entry -; CHECK-V-NEXT: mv a3, a0 -; CHECK-V-NEXT: mv a0, a4 -; CHECK-V-NEXT: bgtz a2, .LBB47_9 -; CHECK-V-NEXT: .LBB47_20: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: bnez a2, .LBB47_10 -; CHECK-V-NEXT: j .LBB47_11 entry: %conv = fptosi <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -6321,36 +6278,28 @@ ; CHECK-NOV-NEXT: mv s1, a1 ; CHECK-NOV-NEXT: fmv.s fa0, fs0 ; CHECK-NOV-NEXT: call __fixunssfti@plt -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: mv a3, a1 -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: beqz a3, .LBB49_2 +; CHECK-NOV-NEXT: beqz a1, .LBB49_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: mv a2, a1 +; CHECK-NOV-NEXT: li a0, 0 ; CHECK-NOV-NEXT: .LBB49_2: # %entry -; CHECK-NOV-NEXT: li a4, 1 -; CHECK-NOV-NEXT: mv a0, a1 -; CHECK-NOV-NEXT: bne a3, a4, .LBB49_7 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: and a0, a1, a0 +; CHECK-NOV-NEXT: beqz s1, .LBB49_4 ; CHECK-NOV-NEXT: # %bb.3: # %entry -; CHECK-NOV-NEXT: bnez s1, .LBB49_8 +; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: .LBB49_4: # %entry -; CHECK-NOV-NEXT: beq s1, a4, .LBB49_6 -; CHECK-NOV-NEXT: .LBB49_5: # %entry -; CHECK-NOV-NEXT: mv a1, s0 -; CHECK-NOV-NEXT: .LBB49_6: # %entry +; CHECK-NOV-NEXT: addi a1, s1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: and a1, a1, s0 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: addi sp, sp, 32 ; CHECK-NOV-NEXT: ret -; CHECK-NOV-NEXT: .LBB49_7: # %entry -; CHECK-NOV-NEXT: mv a0, a2 -; CHECK-NOV-NEXT: beqz s1, .LBB49_4 -; CHECK-NOV-NEXT: .LBB49_8: # %entry -; CHECK-NOV-NEXT: mv s0, a1 -; CHECK-NOV-NEXT: bne s1, a4, .LBB49_5 -; CHECK-NOV-NEXT: j .LBB49_6 ; ; CHECK-V-LABEL: utest_f32i64_mm: ; CHECK-V: # %bb.0: # %entry @@ -6378,23 +6327,24 @@ ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 ; CHECK-V-NEXT: call __fixunssfti@plt -; CHECK-V-NEXT: li a2, 0 ; CHECK-V-NEXT: beqz s1, .LBB49_2 ; CHECK-V-NEXT: # %bb.1: # %entry -; CHECK-V-NEXT: mv s0, a2 +; CHECK-V-NEXT: li s0, 0 ; CHECK-V-NEXT: .LBB49_2: # %entry -; CHECK-V-NEXT: li a4, 1 -; CHECK-V-NEXT: mv a3, a2 -; CHECK-V-NEXT: bne s1, a4, .LBB49_7 +; CHECK-V-NEXT: addi a2, s1, -1 +; CHECK-V-NEXT: seqz a2, a2 +; CHECK-V-NEXT: addi a2, a2, -1 +; CHECK-V-NEXT: and a2, a2, s0 +; CHECK-V-NEXT: beqz a1, .LBB49_4 ; CHECK-V-NEXT: # %bb.3: # %entry -; CHECK-V-NEXT: bnez a1, .LBB49_8 +; CHECK-V-NEXT: li a0, 0 ; CHECK-V-NEXT: .LBB49_4: # %entry -; CHECK-V-NEXT: beq a1, a4, .LBB49_6 -; CHECK-V-NEXT: .LBB49_5: # %entry -; CHECK-V-NEXT: mv a2, a0 -; CHECK-V-NEXT: .LBB49_6: # %entry -; CHECK-V-NEXT: sd a2, 24(sp) -; CHECK-V-NEXT: sd a3, 32(sp) +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: seqz a1, a1 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: and a0, a1, a0 +; CHECK-V-NEXT: sd a0, 24(sp) +; CHECK-V-NEXT: sd a2, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 ; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) @@ -6410,13 +6360,6 @@ ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: addi sp, sp, 80 ; CHECK-V-NEXT: ret -; CHECK-V-NEXT: .LBB49_7: # %entry -; CHECK-V-NEXT: mv a3, s0 -; CHECK-V-NEXT: beqz a1, .LBB49_4 -; CHECK-V-NEXT: .LBB49_8: # %entry -; CHECK-V-NEXT: mv a0, a2 -; CHECK-V-NEXT: bne a1, a4, .LBB49_5 -; CHECK-V-NEXT: j .LBB49_6 entry: %conv = fptoui <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -6443,75 +6386,63 @@ ; CHECK-NOV-NEXT: mv s1, a1 ; CHECK-NOV-NEXT: fmv.s fa0, fs0 ; CHECK-NOV-NEXT: call __fixsfti@plt -; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: li a5, 1 ; CHECK-NOV-NEXT: mv a3, a1 -; CHECK-NOV-NEXT: bgtz a1, .LBB50_12 +; CHECK-NOV-NEXT: bgtz a1, .LBB50_14 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: mv a4, s1 -; CHECK-NOV-NEXT: bgtz s1, .LBB50_13 +; CHECK-NOV-NEXT: bgtz s1, .LBB50_15 ; CHECK-NOV-NEXT: .LBB50_2: # %entry -; CHECK-NOV-NEXT: bgtz a2, .LBB50_14 +; CHECK-NOV-NEXT: bgtz a1, .LBB50_16 ; CHECK-NOV-NEXT: .LBB50_3: # %entry -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: bne a2, a5, .LBB50_15 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: blez s1, .LBB50_5 ; CHECK-NOV-NEXT: .LBB50_4: # %entry -; CHECK-NOV-NEXT: bgtz s1, .LBB50_16 +; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: .LBB50_5: # %entry -; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: bne s1, a5, .LBB50_17 -; CHECK-NOV-NEXT: .LBB50_6: # %entry -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: blez a4, .LBB50_18 +; CHECK-NOV-NEXT: addi a2, s1, -1 +; CHECK-NOV-NEXT: seqz a2, a2 +; CHECK-NOV-NEXT: addi a2, a2, -1 +; CHECK-NOV-NEXT: and a2, a2, s0 +; CHECK-NOV-NEXT: seqz a5, a1 +; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: bgtz a4, .LBB50_7 +; CHECK-NOV-NEXT: # %bb.6: # %entry +; CHECK-NOV-NEXT: li a1, 0 ; CHECK-NOV-NEXT: .LBB50_7: # %entry -; CHECK-NOV-NEXT: bnez a4, .LBB50_19 -; CHECK-NOV-NEXT: .LBB50_8: # %entry +; CHECK-NOV-NEXT: addi a5, a5, -1 +; CHECK-NOV-NEXT: beqz a4, .LBB50_9 +; CHECK-NOV-NEXT: # %bb.8: # %entry ; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: blez a3, .LBB50_20 ; CHECK-NOV-NEXT: .LBB50_9: # %entry -; CHECK-NOV-NEXT: beqz a3, .LBB50_11 -; CHECK-NOV-NEXT: .LBB50_10: # %entry -; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: and a1, a5, a0 +; CHECK-NOV-NEXT: mv a0, a1 +; CHECK-NOV-NEXT: bgtz a3, .LBB50_11 +; CHECK-NOV-NEXT: # %bb.10: # %entry +; CHECK-NOV-NEXT: li a0, 0 ; CHECK-NOV-NEXT: .LBB50_11: # %entry +; CHECK-NOV-NEXT: beqz a3, .LBB50_13 +; CHECK-NOV-NEXT: # %bb.12: # %entry +; CHECK-NOV-NEXT: mv a1, a0 +; CHECK-NOV-NEXT: .LBB50_13: # %entry +; CHECK-NOV-NEXT: mv a0, a2 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: addi sp, sp, 32 ; CHECK-NOV-NEXT: ret -; CHECK-NOV-NEXT: .LBB50_12: # %entry +; CHECK-NOV-NEXT: .LBB50_14: # %entry ; CHECK-NOV-NEXT: li a3, 1 ; CHECK-NOV-NEXT: mv a4, s1 ; CHECK-NOV-NEXT: blez s1, .LBB50_2 -; CHECK-NOV-NEXT: .LBB50_13: # %entry -; CHECK-NOV-NEXT: li a4, 1 -; CHECK-NOV-NEXT: blez a2, .LBB50_3 -; CHECK-NOV-NEXT: .LBB50_14: # %entry -; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: beq a2, a5, .LBB50_4 ; CHECK-NOV-NEXT: .LBB50_15: # %entry -; CHECK-NOV-NEXT: mv a1, a0 -; CHECK-NOV-NEXT: blez s1, .LBB50_5 +; CHECK-NOV-NEXT: li a4, 1 +; CHECK-NOV-NEXT: blez a1, .LBB50_3 ; CHECK-NOV-NEXT: .LBB50_16: # %entry -; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: beq s1, a5, .LBB50_6 -; CHECK-NOV-NEXT: .LBB50_17: # %entry -; CHECK-NOV-NEXT: mv a0, s0 -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: bgtz a4, .LBB50_7 -; CHECK-NOV-NEXT: .LBB50_18: # %entry -; CHECK-NOV-NEXT: li a2, 0 -; CHECK-NOV-NEXT: beqz a4, .LBB50_8 -; CHECK-NOV-NEXT: .LBB50_19: # %entry -; CHECK-NOV-NEXT: mv a0, a2 -; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: bgtz a3, .LBB50_9 -; CHECK-NOV-NEXT: .LBB50_20: # %entry -; CHECK-NOV-NEXT: li a2, 0 -; CHECK-NOV-NEXT: bnez a3, .LBB50_10 -; CHECK-NOV-NEXT: j .LBB50_11 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: bgtz s1, .LBB50_4 +; CHECK-NOV-NEXT: j .LBB50_5 ; ; CHECK-V-LABEL: ustest_f32i64_mm: ; CHECK-V: # %bb.0: # %entry @@ -6539,36 +6470,51 @@ ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 ; CHECK-V-NEXT: call __fixsfti@plt -; CHECK-V-NEXT: li a5, 1 ; CHECK-V-NEXT: mv a2, a1 -; CHECK-V-NEXT: bgtz a1, .LBB50_12 +; CHECK-V-NEXT: blez a1, .LBB50_2 ; CHECK-V-NEXT: # %bb.1: # %entry -; CHECK-V-NEXT: bgtz s0, .LBB50_13 +; CHECK-V-NEXT: li a2, 1 ; CHECK-V-NEXT: .LBB50_2: # %entry -; CHECK-V-NEXT: li a3, 0 -; CHECK-V-NEXT: bne s0, a5, .LBB50_14 -; CHECK-V-NEXT: .LBB50_3: # %entry -; CHECK-V-NEXT: bgtz a1, .LBB50_15 +; CHECK-V-NEXT: blez s0, .LBB50_4 +; CHECK-V-NEXT: # %bb.3: # %entry +; CHECK-V-NEXT: li s1, 0 ; CHECK-V-NEXT: .LBB50_4: # %entry -; CHECK-V-NEXT: li a4, 0 -; CHECK-V-NEXT: bne a1, a5, .LBB50_16 -; CHECK-V-NEXT: .LBB50_5: # %entry -; CHECK-V-NEXT: bgtz s0, .LBB50_17 +; CHECK-V-NEXT: addi a3, s0, -1 +; CHECK-V-NEXT: seqz a3, a3 +; CHECK-V-NEXT: blez a1, .LBB50_6 +; CHECK-V-NEXT: # %bb.5: # %entry +; CHECK-V-NEXT: li a0, 0 ; CHECK-V-NEXT: .LBB50_6: # %entry -; CHECK-V-NEXT: mv a0, a3 -; CHECK-V-NEXT: blez s0, .LBB50_18 -; CHECK-V-NEXT: .LBB50_7: # %entry -; CHECK-V-NEXT: bnez s0, .LBB50_19 +; CHECK-V-NEXT: addi a3, a3, -1 +; CHECK-V-NEXT: addi a4, a1, -1 +; CHECK-V-NEXT: blez s0, .LBB50_8 +; CHECK-V-NEXT: # %bb.7: # %entry +; CHECK-V-NEXT: li s0, 1 ; CHECK-V-NEXT: .LBB50_8: # %entry -; CHECK-V-NEXT: mv a0, a4 -; CHECK-V-NEXT: blez a2, .LBB50_20 -; CHECK-V-NEXT: .LBB50_9: # %entry -; CHECK-V-NEXT: beqz a2, .LBB50_11 +; CHECK-V-NEXT: and a1, a3, s1 +; CHECK-V-NEXT: seqz a4, a4 +; CHECK-V-NEXT: mv a3, a1 +; CHECK-V-NEXT: bgtz s0, .LBB50_10 +; CHECK-V-NEXT: # %bb.9: # %entry +; CHECK-V-NEXT: li a3, 0 ; CHECK-V-NEXT: .LBB50_10: # %entry -; CHECK-V-NEXT: mv a4, a0 -; CHECK-V-NEXT: .LBB50_11: # %entry -; CHECK-V-NEXT: sd a4, 24(sp) -; CHECK-V-NEXT: sd a3, 32(sp) +; CHECK-V-NEXT: addi a4, a4, -1 +; CHECK-V-NEXT: beqz s0, .LBB50_12 +; CHECK-V-NEXT: # %bb.11: # %entry +; CHECK-V-NEXT: mv a1, a3 +; CHECK-V-NEXT: .LBB50_12: # %entry +; CHECK-V-NEXT: and a0, a4, a0 +; CHECK-V-NEXT: mv a3, a0 +; CHECK-V-NEXT: bgtz a2, .LBB50_14 +; CHECK-V-NEXT: # %bb.13: # %entry +; CHECK-V-NEXT: li a3, 0 +; CHECK-V-NEXT: .LBB50_14: # %entry +; CHECK-V-NEXT: beqz a2, .LBB50_16 +; CHECK-V-NEXT: # %bb.15: # %entry +; CHECK-V-NEXT: mv a0, a3 +; CHECK-V-NEXT: .LBB50_16: # %entry +; CHECK-V-NEXT: sd a0, 24(sp) +; CHECK-V-NEXT: sd a1, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 ; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) @@ -6584,38 +6530,6 @@ ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: addi sp, sp, 80 ; CHECK-V-NEXT: ret -; CHECK-V-NEXT: .LBB50_12: # %entry -; CHECK-V-NEXT: li a2, 1 -; CHECK-V-NEXT: blez s0, .LBB50_2 -; CHECK-V-NEXT: .LBB50_13: # %entry -; CHECK-V-NEXT: li s1, 0 -; CHECK-V-NEXT: li a3, 0 -; CHECK-V-NEXT: beq s0, a5, .LBB50_3 -; CHECK-V-NEXT: .LBB50_14: # %entry -; CHECK-V-NEXT: mv a3, s1 -; CHECK-V-NEXT: blez a1, .LBB50_4 -; CHECK-V-NEXT: .LBB50_15: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: li a4, 0 -; CHECK-V-NEXT: beq a1, a5, .LBB50_5 -; CHECK-V-NEXT: .LBB50_16: # %entry -; CHECK-V-NEXT: mv a4, a0 -; CHECK-V-NEXT: blez s0, .LBB50_6 -; CHECK-V-NEXT: .LBB50_17: # %entry -; CHECK-V-NEXT: li s0, 1 -; CHECK-V-NEXT: mv a0, a3 -; CHECK-V-NEXT: bgtz s0, .LBB50_7 -; CHECK-V-NEXT: .LBB50_18: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: beqz s0, .LBB50_8 -; CHECK-V-NEXT: .LBB50_19: # %entry -; CHECK-V-NEXT: mv a3, a0 -; CHECK-V-NEXT: mv a0, a4 -; CHECK-V-NEXT: bgtz a2, .LBB50_9 -; CHECK-V-NEXT: .LBB50_20: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: bnez a2, .LBB50_10 -; CHECK-V-NEXT: j .LBB50_11 entry: %conv = fptosi <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -6878,36 +6792,28 @@ ; CHECK-NOV-NEXT: mv a0, s2 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: call __fixunssfti@plt -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: mv a3, a1 -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: beqz a3, .LBB52_2 +; CHECK-NOV-NEXT: beqz a1, .LBB52_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: mv a2, a1 +; CHECK-NOV-NEXT: li a0, 0 ; CHECK-NOV-NEXT: .LBB52_2: # %entry -; CHECK-NOV-NEXT: li a4, 1 -; CHECK-NOV-NEXT: mv a0, a1 -; CHECK-NOV-NEXT: bne a3, a4, .LBB52_7 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: and a0, a1, a0 +; CHECK-NOV-NEXT: beqz s1, .LBB52_4 ; CHECK-NOV-NEXT: # %bb.3: # %entry -; CHECK-NOV-NEXT: bnez s1, .LBB52_8 +; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: .LBB52_4: # %entry -; CHECK-NOV-NEXT: beq s1, a4, .LBB52_6 -; CHECK-NOV-NEXT: .LBB52_5: # %entry -; CHECK-NOV-NEXT: mv a1, s0 -; CHECK-NOV-NEXT: .LBB52_6: # %entry +; CHECK-NOV-NEXT: addi a1, s1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: and a1, a1, s0 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: addi sp, sp, 32 ; CHECK-NOV-NEXT: ret -; CHECK-NOV-NEXT: .LBB52_7: # %entry -; CHECK-NOV-NEXT: mv a0, a2 -; CHECK-NOV-NEXT: beqz s1, .LBB52_4 -; CHECK-NOV-NEXT: .LBB52_8: # %entry -; CHECK-NOV-NEXT: mv s0, a1 -; CHECK-NOV-NEXT: bne s1, a4, .LBB52_5 -; CHECK-NOV-NEXT: j .LBB52_6 ; ; CHECK-V-LABEL: utesth_f16i64_mm: ; CHECK-V: # %bb.0: # %entry @@ -6930,23 +6836,24 @@ ; CHECK-V-NEXT: mv a0, s2 ; CHECK-V-NEXT: call __extendhfsf2@plt ; CHECK-V-NEXT: call __fixunssfti@plt -; CHECK-V-NEXT: li a2, 0 ; CHECK-V-NEXT: beqz a1, .LBB52_2 ; CHECK-V-NEXT: # %bb.1: # %entry -; CHECK-V-NEXT: mv a0, a2 +; CHECK-V-NEXT: li a0, 0 ; CHECK-V-NEXT: .LBB52_2: # %entry -; CHECK-V-NEXT: li a4, 1 -; CHECK-V-NEXT: mv a3, a2 -; CHECK-V-NEXT: bne a1, a4, .LBB52_7 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: seqz a1, a1 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: and a0, a1, a0 +; CHECK-V-NEXT: beqz s1, .LBB52_4 ; CHECK-V-NEXT: # %bb.3: # %entry -; CHECK-V-NEXT: bnez s1, .LBB52_8 +; CHECK-V-NEXT: li s0, 0 ; CHECK-V-NEXT: .LBB52_4: # %entry -; CHECK-V-NEXT: beq s1, a4, .LBB52_6 -; CHECK-V-NEXT: .LBB52_5: # %entry -; CHECK-V-NEXT: mv a2, s0 -; CHECK-V-NEXT: .LBB52_6: # %entry -; CHECK-V-NEXT: sd a2, 8(sp) -; CHECK-V-NEXT: sd a3, 0(sp) +; CHECK-V-NEXT: addi a1, s1, -1 +; CHECK-V-NEXT: seqz a1, a1 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: and a1, a1, s0 +; CHECK-V-NEXT: sd a1, 8(sp) +; CHECK-V-NEXT: sd a0, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 ; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) @@ -6960,13 +6867,6 @@ ; CHECK-V-NEXT: ld s2, 16(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: addi sp, sp, 48 ; CHECK-V-NEXT: ret -; CHECK-V-NEXT: .LBB52_7: # %entry -; CHECK-V-NEXT: mv a3, a0 -; CHECK-V-NEXT: beqz s1, .LBB52_4 -; CHECK-V-NEXT: .LBB52_8: # %entry -; CHECK-V-NEXT: mv s0, a2 -; CHECK-V-NEXT: bne s1, a4, .LBB52_5 -; CHECK-V-NEXT: j .LBB52_6 entry: %conv = fptoui <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -6995,75 +6895,63 @@ ; CHECK-NOV-NEXT: mv a0, s2 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: call __fixsfti@plt -; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: li a5, 1 ; CHECK-NOV-NEXT: mv a3, a1 -; CHECK-NOV-NEXT: bgtz a1, .LBB53_12 +; CHECK-NOV-NEXT: bgtz a1, .LBB53_14 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: mv a4, s1 -; CHECK-NOV-NEXT: bgtz s1, .LBB53_13 +; CHECK-NOV-NEXT: bgtz s1, .LBB53_15 ; CHECK-NOV-NEXT: .LBB53_2: # %entry -; CHECK-NOV-NEXT: bgtz a2, .LBB53_14 +; CHECK-NOV-NEXT: bgtz a1, .LBB53_16 ; CHECK-NOV-NEXT: .LBB53_3: # %entry -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: bne a2, a5, .LBB53_15 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: blez s1, .LBB53_5 ; CHECK-NOV-NEXT: .LBB53_4: # %entry -; CHECK-NOV-NEXT: bgtz s1, .LBB53_16 +; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: .LBB53_5: # %entry -; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: bne s1, a5, .LBB53_17 -; CHECK-NOV-NEXT: .LBB53_6: # %entry -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: blez a4, .LBB53_18 +; CHECK-NOV-NEXT: addi a2, s1, -1 +; CHECK-NOV-NEXT: seqz a2, a2 +; CHECK-NOV-NEXT: addi a2, a2, -1 +; CHECK-NOV-NEXT: and a2, a2, s0 +; CHECK-NOV-NEXT: seqz a5, a1 +; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: bgtz a4, .LBB53_7 +; CHECK-NOV-NEXT: # %bb.6: # %entry +; CHECK-NOV-NEXT: li a1, 0 ; CHECK-NOV-NEXT: .LBB53_7: # %entry -; CHECK-NOV-NEXT: bnez a4, .LBB53_19 -; CHECK-NOV-NEXT: .LBB53_8: # %entry +; CHECK-NOV-NEXT: addi a5, a5, -1 +; CHECK-NOV-NEXT: beqz a4, .LBB53_9 +; CHECK-NOV-NEXT: # %bb.8: # %entry ; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: blez a3, .LBB53_20 ; CHECK-NOV-NEXT: .LBB53_9: # %entry -; CHECK-NOV-NEXT: beqz a3, .LBB53_11 -; CHECK-NOV-NEXT: .LBB53_10: # %entry -; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: and a1, a5, a0 +; CHECK-NOV-NEXT: mv a0, a1 +; CHECK-NOV-NEXT: bgtz a3, .LBB53_11 +; CHECK-NOV-NEXT: # %bb.10: # %entry +; CHECK-NOV-NEXT: li a0, 0 ; CHECK-NOV-NEXT: .LBB53_11: # %entry +; CHECK-NOV-NEXT: beqz a3, .LBB53_13 +; CHECK-NOV-NEXT: # %bb.12: # %entry +; CHECK-NOV-NEXT: mv a1, a0 +; CHECK-NOV-NEXT: .LBB53_13: # %entry +; CHECK-NOV-NEXT: mv a0, a2 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: addi sp, sp, 32 ; CHECK-NOV-NEXT: ret -; CHECK-NOV-NEXT: .LBB53_12: # %entry +; CHECK-NOV-NEXT: .LBB53_14: # %entry ; CHECK-NOV-NEXT: li a3, 1 ; CHECK-NOV-NEXT: mv a4, s1 ; CHECK-NOV-NEXT: blez s1, .LBB53_2 -; CHECK-NOV-NEXT: .LBB53_13: # %entry -; CHECK-NOV-NEXT: li a4, 1 -; CHECK-NOV-NEXT: blez a2, .LBB53_3 -; CHECK-NOV-NEXT: .LBB53_14: # %entry -; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: li a1, 0 -; CHECK-NOV-NEXT: beq a2, a5, .LBB53_4 ; CHECK-NOV-NEXT: .LBB53_15: # %entry -; CHECK-NOV-NEXT: mv a1, a0 -; CHECK-NOV-NEXT: blez s1, .LBB53_5 +; CHECK-NOV-NEXT: li a4, 1 +; CHECK-NOV-NEXT: blez a1, .LBB53_3 ; CHECK-NOV-NEXT: .LBB53_16: # %entry -; CHECK-NOV-NEXT: li s0, 0 ; CHECK-NOV-NEXT: li a0, 0 -; CHECK-NOV-NEXT: beq s1, a5, .LBB53_6 -; CHECK-NOV-NEXT: .LBB53_17: # %entry -; CHECK-NOV-NEXT: mv a0, s0 -; CHECK-NOV-NEXT: mv a2, a0 -; CHECK-NOV-NEXT: bgtz a4, .LBB53_7 -; CHECK-NOV-NEXT: .LBB53_18: # %entry -; CHECK-NOV-NEXT: li a2, 0 -; CHECK-NOV-NEXT: beqz a4, .LBB53_8 -; CHECK-NOV-NEXT: .LBB53_19: # %entry -; CHECK-NOV-NEXT: mv a0, a2 -; CHECK-NOV-NEXT: mv a2, a1 -; CHECK-NOV-NEXT: bgtz a3, .LBB53_9 -; CHECK-NOV-NEXT: .LBB53_20: # %entry -; CHECK-NOV-NEXT: li a2, 0 -; CHECK-NOV-NEXT: bnez a3, .LBB53_10 -; CHECK-NOV-NEXT: j .LBB53_11 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: bgtz s1, .LBB53_4 +; CHECK-NOV-NEXT: j .LBB53_5 ; ; CHECK-V-LABEL: ustest_f16i64_mm: ; CHECK-V: # %bb.0: # %entry @@ -7085,37 +6973,46 @@ ; CHECK-V-NEXT: mv a0, s2 ; CHECK-V-NEXT: call __extendhfsf2@plt ; CHECK-V-NEXT: call __fixsfti@plt -; CHECK-V-NEXT: li a5, 1 ; CHECK-V-NEXT: mv a2, a1 -; CHECK-V-NEXT: bgtz a1, .LBB53_12 +; CHECK-V-NEXT: bgtz a1, .LBB53_14 ; CHECK-V-NEXT: # %bb.1: # %entry -; CHECK-V-NEXT: mv a4, s1 -; CHECK-V-NEXT: bgtz s1, .LBB53_13 +; CHECK-V-NEXT: mv a3, s1 +; CHECK-V-NEXT: bgtz s1, .LBB53_15 ; CHECK-V-NEXT: .LBB53_2: # %entry -; CHECK-V-NEXT: bgtz a1, .LBB53_14 +; CHECK-V-NEXT: bgtz a1, .LBB53_16 ; CHECK-V-NEXT: .LBB53_3: # %entry -; CHECK-V-NEXT: li a3, 0 -; CHECK-V-NEXT: bne a1, a5, .LBB53_15 +; CHECK-V-NEXT: addi a4, a1, -1 +; CHECK-V-NEXT: blez s1, .LBB53_5 ; CHECK-V-NEXT: .LBB53_4: # %entry -; CHECK-V-NEXT: bgtz s1, .LBB53_16 +; CHECK-V-NEXT: li s0, 0 ; CHECK-V-NEXT: .LBB53_5: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: bne s1, a5, .LBB53_17 -; CHECK-V-NEXT: .LBB53_6: # %entry -; CHECK-V-NEXT: mv a1, a0 -; CHECK-V-NEXT: blez a4, .LBB53_18 +; CHECK-V-NEXT: addi a1, s1, -1 +; CHECK-V-NEXT: seqz a1, a1 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: and a1, a1, s0 +; CHECK-V-NEXT: seqz a5, a4 +; CHECK-V-NEXT: mv a4, a1 +; CHECK-V-NEXT: bgtz a3, .LBB53_7 +; CHECK-V-NEXT: # %bb.6: # %entry +; CHECK-V-NEXT: li a4, 0 ; CHECK-V-NEXT: .LBB53_7: # %entry -; CHECK-V-NEXT: bnez a4, .LBB53_19 -; CHECK-V-NEXT: .LBB53_8: # %entry -; CHECK-V-NEXT: mv a1, a3 -; CHECK-V-NEXT: blez a2, .LBB53_20 +; CHECK-V-NEXT: addi a5, a5, -1 +; CHECK-V-NEXT: beqz a3, .LBB53_9 +; CHECK-V-NEXT: # %bb.8: # %entry +; CHECK-V-NEXT: mv a1, a4 ; CHECK-V-NEXT: .LBB53_9: # %entry -; CHECK-V-NEXT: beqz a2, .LBB53_11 -; CHECK-V-NEXT: .LBB53_10: # %entry -; CHECK-V-NEXT: mv a3, a1 +; CHECK-V-NEXT: and a0, a5, a0 +; CHECK-V-NEXT: mv a3, a0 +; CHECK-V-NEXT: bgtz a2, .LBB53_11 +; CHECK-V-NEXT: # %bb.10: # %entry +; CHECK-V-NEXT: li a3, 0 ; CHECK-V-NEXT: .LBB53_11: # %entry -; CHECK-V-NEXT: sd a3, 8(sp) -; CHECK-V-NEXT: sd a0, 0(sp) +; CHECK-V-NEXT: beqz a2, .LBB53_13 +; CHECK-V-NEXT: # %bb.12: # %entry +; CHECK-V-NEXT: mv a0, a3 +; CHECK-V-NEXT: .LBB53_13: # %entry +; CHECK-V-NEXT: sd a0, 8(sp) +; CHECK-V-NEXT: sd a1, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 ; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) @@ -7129,39 +7026,18 @@ ; CHECK-V-NEXT: ld s2, 16(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: addi sp, sp, 48 ; CHECK-V-NEXT: ret -; CHECK-V-NEXT: .LBB53_12: # %entry +; CHECK-V-NEXT: .LBB53_14: # %entry ; CHECK-V-NEXT: li a2, 1 -; CHECK-V-NEXT: mv a4, s1 +; CHECK-V-NEXT: mv a3, s1 ; CHECK-V-NEXT: blez s1, .LBB53_2 -; CHECK-V-NEXT: .LBB53_13: # %entry -; CHECK-V-NEXT: li a4, 1 -; CHECK-V-NEXT: blez a1, .LBB53_3 -; CHECK-V-NEXT: .LBB53_14: # %entry -; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: li a3, 0 -; CHECK-V-NEXT: beq a1, a5, .LBB53_4 ; CHECK-V-NEXT: .LBB53_15: # %entry -; CHECK-V-NEXT: mv a3, a0 -; CHECK-V-NEXT: blez s1, .LBB53_5 +; CHECK-V-NEXT: li a3, 1 +; CHECK-V-NEXT: blez a1, .LBB53_3 ; CHECK-V-NEXT: .LBB53_16: # %entry -; CHECK-V-NEXT: li s0, 0 ; CHECK-V-NEXT: li a0, 0 -; CHECK-V-NEXT: beq s1, a5, .LBB53_6 -; CHECK-V-NEXT: .LBB53_17: # %entry -; CHECK-V-NEXT: mv a0, s0 -; CHECK-V-NEXT: mv a1, a0 -; CHECK-V-NEXT: bgtz a4, .LBB53_7 -; CHECK-V-NEXT: .LBB53_18: # %entry -; CHECK-V-NEXT: li a1, 0 -; CHECK-V-NEXT: beqz a4, .LBB53_8 -; CHECK-V-NEXT: .LBB53_19: # %entry -; CHECK-V-NEXT: mv a0, a1 -; CHECK-V-NEXT: mv a1, a3 -; CHECK-V-NEXT: bgtz a2, .LBB53_9 -; CHECK-V-NEXT: .LBB53_20: # %entry -; CHECK-V-NEXT: li a1, 0 -; CHECK-V-NEXT: bnez a2, .LBB53_10 -; CHECK-V-NEXT: j .LBB53_11 +; CHECK-V-NEXT: addi a4, a1, -1 +; CHECK-V-NEXT: bgtz s1, .LBB53_4 +; CHECK-V-NEXT: j .LBB53_5 entry: %conv = fptosi <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) Index: llvm/test/CodeGen/RISCV/half-convert.ll =================================================================== --- llvm/test/CodeGen/RISCV/half-convert.ll +++ llvm/test/CodeGen/RISCV/half-convert.ll @@ -158,12 +158,9 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bnez a1, .LBB1_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: .LBB1_6: # %start +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -204,12 +201,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB1_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a0, s2 -; RV64I-NEXT: .LBB1_6: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -327,25 +321,23 @@ ; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and a0, a0, s0 ; RV32I-NEXT: call __extendhfsf2@plt +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: call __fixunssfsi@plt ; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: li s3, 0 -; RV32I-NEXT: bltz s2, .LBB3_2 -; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s3, a0 -; RV32I-NEXT: .LBB3_2: # %start ; RV32I-NEXT: lui a0, 292864 ; RV32I-NEXT: addi a1, a0, -256 -; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: bgtz a0, .LBB3_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s0, s3 -; RV32I-NEXT: .LBB3_4: # %start +; RV32I-NEXT: bgtz a0, .LBB3_2 +; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: slti a0, s2, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and s0, a0, s1 +; RV32I-NEXT: .LBB3_2: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload @@ -367,25 +359,23 @@ ; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and a0, a0, s0 ; RV64I-NEXT: call __extendhfsf2@plt +; RV64I-NEXT: mv s3, a0 +; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt ; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: mv a0, s1 -; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s3, 0 -; RV64I-NEXT: bltz s2, .LBB3_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s3, a0 -; RV64I-NEXT: .LBB3_2: # %start ; RV64I-NEXT: lui a0, 292864 ; RV64I-NEXT: addiw a1, a0, -256 -; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: bgtz a0, .LBB3_4 -; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv s0, s3 -; RV64I-NEXT: .LBB3_4: # %start +; RV64I-NEXT: bgtz a0, .LBB3_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s2, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and s0, a0, s1 +; RV64I-NEXT: .LBB3_2: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -505,12 +495,9 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bnez a1, .LBB5_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: .LBB5_6: # %start +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -553,12 +540,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB5_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a0, s2 -; RV64I-NEXT: .LBB5_6: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -775,27 +759,27 @@ ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: call __extendhfsf2@plt -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s1, 0 -; RV64I-NEXT: bltz s2, .LBB8_2 -; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: .LBB8_2: # %start ; RV64I-NEXT: lui a0, 325632 ; RV64I-NEXT: addiw a1, a0, -1 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: blez a0, .LBB8_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: bgtz a0, .LBB8_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: j .LBB8_3 +; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli s1, a0, 32 -; RV64I-NEXT: .LBB8_4: # %start -; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .LBB8_3: # %start ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -979,63 +963,57 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2@plt -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 389120 -; RV32I-NEXT: addi s2, a0, -1 -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: li s0, 0 -; RV32I-NEXT: sgtz a0, a0 -; RV32I-NEXT: neg s5, a0 -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: call __fixsfdi@plt -; RV32I-NEXT: mv s3, a0 -; RV32I-NEXT: mv s4, a1 +; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 913408 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: bltz a0, .LBB10_2 +; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: call __fixsfdi@plt +; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call __unordsf2@plt +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi s1, a0, -1 +; RV32I-NEXT: lui a0, 389120 +; RV32I-NEXT: addi s3, a0, -1 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: call __gtsf2@plt +; RV32I-NEXT: bgtz a0, .LBB10_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: or s5, s5, s3 +; RV32I-NEXT: slti a0, s4, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s5 +; RV32I-NEXT: and s1, s1, a0 ; RV32I-NEXT: .LBB10_2: # %start -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv s3, s0 -; RV32I-NEXT: bnez a0, .LBB10_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s3, s5 -; RV32I-NEXT: .LBB10_4: # %start ; RV32I-NEXT: lui a1, 913408 -; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: lui s6, 524288 ; RV32I-NEXT: lui s5, 524288 -; RV32I-NEXT: bltz a0, .LBB10_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv s5, s4 -; RV32I-NEXT: .LBB10_6: # %start -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: lui s4, 524288 +; RV32I-NEXT: bltz a0, .LBB10_4 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: mv s4, s2 +; RV32I-NEXT: .LBB10_4: # %start +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: bge s0, a0, .LBB10_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: addi s5, s6, -1 -; RV32I-NEXT: .LBB10_8: # %start -; RV32I-NEXT: mv a0, s1 -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: bnez a0, .LBB10_10 -; RV32I-NEXT: # %bb.9: # %start -; RV32I-NEXT: mv s0, s5 -; RV32I-NEXT: .LBB10_10: # %start -; RV32I-NEXT: mv a0, s3 +; RV32I-NEXT: blez a0, .LBB10_6 +; RV32I-NEXT: # %bb.5: +; RV32I-NEXT: addi s4, s5, -1 +; RV32I-NEXT: .LBB10_6: # %start +; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call __unordsf2@plt +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a1, a0, s4 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1043,7 +1021,6 @@ ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -1083,12 +1060,9 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bnez a1, .LBB10_7 -; RV64I-NEXT: # %bb.6: # %start -; RV64I-NEXT: mv a0, s1 -; RV64I-NEXT: .LBB10_7: # %start +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -2329,12 +2303,10 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bnez a0, .LBB32_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: .LBB32_6: # %start -; RV32I-NEXT: slli a0, a1, 16 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2376,12 +2348,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB32_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB32_6: # %start -; RV64I-NEXT: slli a0, a1, 48 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -2496,30 +2466,28 @@ ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s2, a1, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: addi s3, a1, -1 +; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: call __extendhfsf2@plt +; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: call __fixunssfsi@plt ; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: li s3, 0 -; RV32I-NEXT: bltz s1, .LBB34_2 -; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s3, a0 -; RV32I-NEXT: .LBB34_2: # %start ; RV32I-NEXT: lui a0, 292864 ; RV32I-NEXT: addi a1, a0, -256 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: bgtz a0, .LBB34_4 -; RV32I-NEXT: # %bb.3: # %start ; RV32I-NEXT: mv a1, s3 -; RV32I-NEXT: .LBB34_4: # %start -; RV32I-NEXT: and a0, a1, s2 +; RV32I-NEXT: bgtz a0, .LBB34_2 +; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: slti a0, s1, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: .LBB34_2: # %start +; RV32I-NEXT: and a0, a1, s3 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -2537,30 +2505,28 @@ ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw s2, a1, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: addiw s3, a1, -1 +; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: call __extendhfsf2@plt +; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s3, 0 -; RV64I-NEXT: bltz s1, .LBB34_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s3, a0 -; RV64I-NEXT: .LBB34_2: # %start ; RV64I-NEXT: lui a0, 292864 ; RV64I-NEXT: addiw a1, a0, -256 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: bgtz a0, .LBB34_4 -; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: mv a1, s3 -; RV64I-NEXT: .LBB34_4: # %start -; RV64I-NEXT: and a0, a1, s2 +; RV64I-NEXT: bgtz a0, .LBB34_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s1, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: .LBB34_2: # %start +; RV64I-NEXT: and a0, a1, s3 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -2718,12 +2684,10 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bnez a0, .LBB36_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: .LBB36_6: # %start -; RV32I-NEXT: slli a0, a1, 24 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2764,12 +2728,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB36_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s1 -; RV64I-NEXT: .LBB36_6: # %start -; RV64I-NEXT: slli a0, a1, 56 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -2886,25 +2848,23 @@ ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2@plt -; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: li s2, 0 -; RV32I-NEXT: bltz s1, .LBB38_2 -; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: .LBB38_2: # %start +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a1, 276464 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: li a1, 255 -; RV32I-NEXT: bgtz a0, .LBB38_4 -; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv a1, s2 -; RV32I-NEXT: .LBB38_4: # %start +; RV32I-NEXT: bgtz a0, .LBB38_2 +; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: slti a0, s0, 0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a1, a0, s1 +; RV32I-NEXT: .LBB38_2: # %start ; RV32I-NEXT: andi a0, a1, 255 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2923,25 +2883,23 @@ ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: call __extendhfsf2@plt -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB38_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB38_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a1, 276464 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: li a1, 255 -; RV64I-NEXT: bgtz a0, .LBB38_4 -; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB38_4: # %start +; RV64I-NEXT: bgtz a0, .LBB38_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a1, a0, s1 +; RV64I-NEXT: .LBB38_2: # %start ; RV64I-NEXT: andi a0, a1, 255 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -3041,27 +2999,28 @@ ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: call __extendhfsf2@plt -; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: li s2, 0 -; RV64I-NEXT: bltz s1, .LBB39_2 -; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 -; RV64I-NEXT: .LBB39_2: # %start +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui a0, 325632 ; RV64I-NEXT: addiw a1, a0, -1 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: blez a0, .LBB39_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: bgtz a0, .LBB39_2 +; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: slti a0, s0, 0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: j .LBB39_3 +; RV64I-NEXT: .LBB39_2: ; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli s2, a0, 32 -; RV64I-NEXT: .LBB39_4: # %start -; RV64I-NEXT: slli a0, s2, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .LBB39_3: # %start +; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -3136,12 +3095,9 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bnez a1, .LBB40_6 -; RV32I-NEXT: # %bb.5: # %start -; RV32I-NEXT: mv a0, s2 -; RV32I-NEXT: .LBB40_6: # %start +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -3184,12 +3140,10 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: bnez a0, .LBB40_6 -; RV64I-NEXT: # %bb.5: # %start -; RV64I-NEXT: mv a1, s2 -; RV64I-NEXT: .LBB40_6: # %start -; RV64I-NEXT: sext.w a0, a1 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload Index: llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll +++ llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll @@ -298,30 +298,30 @@ define i64 @not_shl_one_i64(i64 %x) { ; RV32I-LABEL: not_shl_one_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: li a1, 1 -; RV32I-NEXT: sll a2, a1, a0 -; RV32I-NEXT: addi a0, a0, -32 -; RV32I-NEXT: sll a1, a1, a0 -; RV32I-NEXT: slti a0, a0, 0 -; RV32I-NEXT: neg a3, a0 -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: or a1, a3, a1 -; RV32I-NEXT: not a2, a2 -; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: addi a1, a0, -32 +; RV32I-NEXT: li a2, 1 +; RV32I-NEXT: sll a3, a2, a1 +; RV32I-NEXT: slti a1, a1, 0 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: and a3, a1, a3 +; RV32I-NEXT: sll a0, a2, a0 +; RV32I-NEXT: not a0, a0 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: not a1, a3 ; RV32I-NEXT: ret ; ; RV32ZBB-ZBKB-LABEL: not_shl_one_i64: ; RV32ZBB-ZBKB: # %bb.0: ; RV32ZBB-ZBKB-NEXT: addi a1, a0, -32 -; RV32ZBB-ZBKB-NEXT: li a2, -2 -; RV32ZBB-ZBKB-NEXT: rol a3, a2, a1 -; RV32ZBB-ZBKB-NEXT: slti a4, a1, 0 -; RV32ZBB-ZBKB-NEXT: neg a1, a4 -; RV32ZBB-ZBKB-NEXT: or a1, a1, a3 -; RV32ZBB-ZBKB-NEXT: rol a0, a2, a0 -; RV32ZBB-ZBKB-NEXT: addi a2, a4, -1 -; RV32ZBB-ZBKB-NEXT: or a0, a2, a0 +; RV32ZBB-ZBKB-NEXT: li a2, 1 +; RV32ZBB-ZBKB-NEXT: sll a2, a2, a1 +; RV32ZBB-ZBKB-NEXT: slti a1, a1, 0 +; RV32ZBB-ZBKB-NEXT: addi a1, a1, -1 +; RV32ZBB-ZBKB-NEXT: and a2, a1, a2 +; RV32ZBB-ZBKB-NEXT: li a3, -2 +; RV32ZBB-ZBKB-NEXT: rol a0, a3, a0 +; RV32ZBB-ZBKB-NEXT: or a0, a1, a0 +; RV32ZBB-ZBKB-NEXT: not a1, a2 ; RV32ZBB-ZBKB-NEXT: ret %1 = shl i64 1, %x %2 = xor i64 %1, -1 Index: llvm/test/CodeGen/RISCV/rv32zbs.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv32zbs.ll +++ llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -46,31 +46,37 @@ define i64 @bclr_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: bclr_i64: ; RV32I: # %bb.0: +; RV32I-NEXT: li a4, 1 ; RV32I-NEXT: andi a3, a2, 63 -; RV32I-NEXT: addi a4, a3, -32 -; RV32I-NEXT: li a3, 1 -; RV32I-NEXT: bltz a4, .LBB2_2 +; RV32I-NEXT: addi a5, a3, -32 +; RV32I-NEXT: sll a3, a4, a5 +; RV32I-NEXT: slti a6, a5, 0 +; RV32I-NEXT: addi a6, a6, -1 +; RV32I-NEXT: and a3, a6, a3 +; RV32I-NEXT: not a3, a3 +; RV32I-NEXT: bgez a5, .LBB2_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sll a2, a3, a4 -; RV32I-NEXT: not a2, a2 -; RV32I-NEXT: and a1, a1, a2 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB2_2: -; RV32I-NEXT: sll a2, a3, a2 +; RV32I-NEXT: sll a2, a4, a2 ; RV32I-NEXT: not a2, a2 ; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: .LBB2_2: +; RV32I-NEXT: and a1, a3, a1 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bclr_i64: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: andi a3, a2, 63 -; RV32ZBS-NEXT: addi a3, a3, -32 -; RV32ZBS-NEXT: bltz a3, .LBB2_2 +; RV32ZBS-NEXT: addi a4, a3, -32 +; RV32ZBS-NEXT: bset a3, zero, a4 +; RV32ZBS-NEXT: slti a5, a4, 0 +; RV32ZBS-NEXT: addi a5, a5, -1 +; RV32ZBS-NEXT: and a3, a5, a3 +; RV32ZBS-NEXT: not a3, a3 +; RV32ZBS-NEXT: bgez a4, .LBB2_2 ; RV32ZBS-NEXT: # %bb.1: -; RV32ZBS-NEXT: bclr a1, a1, a3 -; RV32ZBS-NEXT: ret -; RV32ZBS-NEXT: .LBB2_2: ; RV32ZBS-NEXT: bclr a0, a0, a2 +; RV32ZBS-NEXT: .LBB2_2: +; RV32ZBS-NEXT: and a1, a3, a1 ; RV32ZBS-NEXT: ret %and = and i64 %b, 63 %shl = shl nuw i64 1, %and @@ -168,11 +174,14 @@ ; RV32I-NEXT: bltz a1, .LBB7_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: sll a1, a2, a1 -; RV32I-NEXT: ret +; RV32I-NEXT: j .LBB7_3 ; RV32I-NEXT: .LBB7_2: -; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: sll a0, a2, a0 +; RV32I-NEXT: .LBB7_3: +; RV32I-NEXT: sll a2, a2, a1 +; RV32I-NEXT: slti a1, a1, 0 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: and a1, a1, a2 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bset_i64_zero: @@ -181,11 +190,14 @@ ; RV32ZBS-NEXT: bltz a1, .LBB7_2 ; RV32ZBS-NEXT: # %bb.1: ; RV32ZBS-NEXT: li a0, 0 -; RV32ZBS-NEXT: bset a1, zero, a1 -; RV32ZBS-NEXT: ret +; RV32ZBS-NEXT: j .LBB7_3 ; RV32ZBS-NEXT: .LBB7_2: -; RV32ZBS-NEXT: li a1, 0 ; RV32ZBS-NEXT: bset a0, zero, a0 +; RV32ZBS-NEXT: .LBB7_3: +; RV32ZBS-NEXT: bset a2, zero, a1 +; RV32ZBS-NEXT: slti a1, a1, 0 +; RV32ZBS-NEXT: addi a1, a1, -1 +; RV32ZBS-NEXT: and a1, a1, a2 ; RV32ZBS-NEXT: ret %shl = shl i64 1, %a ret i64 %shl Index: llvm/test/CodeGen/RISCV/rv64zbb.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv64zbb.ll +++ llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -479,19 +479,17 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ffs_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -32 -; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: li s0, 0 +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: and a0, s1, a0 +; RV64I-NEXT: and a0, s0, a0 ; RV64I-NEXT: lui a1, 30667 ; RV64I-NEXT: addiw a1, a1, 1329 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: li a1, 32 -; RV64I-NEXT: beqz s1, .LBB9_2 +; RV64I-NEXT: beqz s0, .LBB9_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: srliw a0, a0, 27 ; RV64I-NEXT: lui a1, %hi(.LCPI9_0) @@ -499,26 +497,22 @@ ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lbu a1, 0(a0) ; RV64I-NEXT: .LBB9_2: -; RV64I-NEXT: beqz s1, .LBB9_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addi s0, a1, 1 -; RV64I-NEXT: .LBB9_4: -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: addi a0, a1, 1 +; RV64I-NEXT: seqz a1, s0 +; RV64I-NEXT: addi a1, a1, -1 +; RV64I-NEXT: and a0, a1, a0 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: ffs_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: mv a1, a0 -; RV64ZBB-NEXT: li a0, 0 -; RV64ZBB-NEXT: beqz a1, .LBB9_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: ctzw a0, a1 -; RV64ZBB-NEXT: addi a0, a0, 1 -; RV64ZBB-NEXT: .LBB9_2: +; RV64ZBB-NEXT: ctzw a1, a0 +; RV64ZBB-NEXT: addi a1, a1, 1 +; RV64ZBB-NEXT: seqz a0, a0 +; RV64ZBB-NEXT: addi a0, a0, -1 +; RV64ZBB-NEXT: and a0, a0, a1 ; RV64ZBB-NEXT: ret %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true) %2 = add i32 %1, 1 Index: llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -737,67 +737,59 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v25, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v2, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 3 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -809,37 +801,34 @@ ; CHECK-LABEL: vp_ceil_vv_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bltu a0, a1, .LBB33_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 +; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 3 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll @@ -95,23 +95,21 @@ ; CHECK-LABEL: vfpext_v32f32_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB7_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfwcvt.f.f.v v16, v24, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB7_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB7_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll @@ -102,21 +102,19 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB7_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB7_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB7_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll @@ -61,21 +61,19 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma -; CHECK-NEXT: addi a2, a0, -64 ; CHECK-NEXT: vslidedown.vi v0, v0, 8 -; CHECK-NEXT: bltu a0, a2, .LBB4_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB4_2: +; CHECK-NEXT: addi a1, a0, -64 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB4_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB4_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: .LBB4_4: +; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 @@ -257,217 +255,219 @@ ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vi v3, v0, 8 +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vi v2, v0, 4 +; CHECK-NEXT: vslidedown.vi v27, v3, 4 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v27, 2 +; CHECK-NEXT: addi a2, a1, 512 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: addi a3, a1, 640 +; CHECK-NEXT: vle64.v v8, (a3) ; CHECK-NEXT: addi a3, a7, -64 -; CHECK-NEXT: vslidedown.vi v2, v0, 8 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a7, a3, .LBB16_2 +; CHECK-NEXT: sltu a4, a7, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a4, a4, a3 +; CHECK-NEXT: addi a3, a4, -32 +; CHECK-NEXT: sltu a5, a4, a3 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a3, a5, a3 +; CHECK-NEXT: addi a5, a3, -16 +; CHECK-NEXT: sltu a6, a3, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a5, a6, a5 +; CHECK-NEXT: vle64.v v16, (a2) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a6, 40 +; CHECK-NEXT: mul a2, a2, a6 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: addi a5, a1, 128 +; CHECK-NEXT: bltu a3, a2, .LBB16_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: li a3, 16 ; CHECK-NEXT: .LBB16_2: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vi v3, v2, 4 -; CHECK-NEXT: addi a6, a4, -32 -; CHECK-NEXT: addi a3, a1, 640 -; CHECK-NEXT: mv a5, a2 -; CHECK-NEXT: bltu a4, a6, .LBB16_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a6 -; CHECK-NEXT: .LBB16_4: ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v3, 2 +; CHECK-NEXT: vslidedown.vi v4, v2, 2 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v16, (a3) -; CHECK-NEXT: addi t0, a5, -16 -; CHECK-NEXT: addi a6, a1, 512 -; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a5, t0, .LBB16_6 -; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a3, t0 -; CHECK-NEXT: .LBB16_6: -; CHECK-NEXT: vle64.v v8, (a6) +; CHECK-NEXT: vle64.v v8, (a5) ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma -; CHECK-NEXT: li a3, 16 +; CHECK-NEXT: li a3, 64 +; CHECK-NEXT: vmv1r.v v0, v27 +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: li a6, 40 +; CHECK-NEXT: mul a5, a5, a6 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: li a6, 48 +; CHECK-NEXT: mul a5, a5, a6 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a7, a3, .LBB16_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a7, 64 +; CHECK-NEXT: .LBB16_4: +; CHECK-NEXT: li a3, 32 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: addi a5, a7, -32 +; CHECK-NEXT: sltu a6, a7, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a5, a6, a5 +; CHECK-NEXT: addi a6, a5, -16 +; CHECK-NEXT: sltu t0, a5, a6 +; CHECK-NEXT: addi t0, t0, -1 +; CHECK-NEXT: and a6, t0, a6 +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v0, v4 +; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t ; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: slli a6, a6, 4 +; CHECK-NEXT: slli a6, a6, 3 ; CHECK-NEXT: add a6, sp, a6 ; CHECK-NEXT: addi a6, a6, 16 ; CHECK-NEXT: vs8r.v v24, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a5, a3, .LBB16_8 -; CHECK-NEXT: # %bb.7: +; CHECK-NEXT: bltu a5, a2, .LBB16_6 +; CHECK-NEXT: # %bb.5: ; CHECK-NEXT: li a5, 16 -; CHECK-NEXT: .LBB16_8: +; CHECK-NEXT: .LBB16_6: +; CHECK-NEXT: addi a6, a1, 384 +; CHECK-NEXT: addi a1, a1, 256 ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma -; CHECK-NEXT: li a5, 64 -; CHECK-NEXT: vmv1r.v v0, v3 -; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t -; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: li t0, 48 -; CHECK-NEXT: mul a6, a6, t0 -; CHECK-NEXT: add a6, sp, a6 -; CHECK-NEXT: addi a6, a6, 16 -; CHECK-NEXT: vs8r.v v16, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a7, a5, .LBB16_10 +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: li t0, 40 +; CHECK-NEXT: mul a5, a5, t0 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a3, .LBB16_8 +; CHECK-NEXT: # %bb.7: +; CHECK-NEXT: li a4, 32 +; CHECK-NEXT: .LBB16_8: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v4, v3, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v16, (a6) +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: mv a1, a4 +; CHECK-NEXT: bltu a4, a2, .LBB16_10 ; CHECK-NEXT: # %bb.9: -; CHECK-NEXT: li a7, 64 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB16_10: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vi v3, v1, 4 -; CHECK-NEXT: addi t0, a7, -32 -; CHECK-NEXT: addi a5, a1, 128 -; CHECK-NEXT: mv a6, a2 -; CHECK-NEXT: bltu a7, t0, .LBB16_12 -; CHECK-NEXT: # %bb.11: -; CHECK-NEXT: mv a6, t0 -; CHECK-NEXT: .LBB16_12: ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v3, 2 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v16, (a5) -; CHECK-NEXT: addi a5, a6, -16 -; CHECK-NEXT: mv t0, a2 -; CHECK-NEXT: bltu a6, a5, .LBB16_14 -; CHECK-NEXT: # %bb.13: -; CHECK-NEXT: mv t0, a5 -; CHECK-NEXT: .LBB16_14: -; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: li a5, 32 -; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t -; CHECK-NEXT: csrr t0, vlenb -; CHECK-NEXT: slli t0, t0, 3 -; CHECK-NEXT: add t0, sp, t0 -; CHECK-NEXT: addi t0, t0, 16 -; CHECK-NEXT: vs8r.v v24, (t0) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a6, a3, .LBB16_16 -; CHECK-NEXT: # %bb.15: -; CHECK-NEXT: li a6, 16 -; CHECK-NEXT: .LBB16_16: -; CHECK-NEXT: addi t0, a1, 384 -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma +; CHECK-NEXT: vslidedown.vi v2, v1, 2 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v3 -; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t -; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: li t1, 40 -; CHECK-NEXT: mul a6, a6, t1 -; CHECK-NEXT: add a6, sp, a6 -; CHECK-NEXT: addi a6, a6, 16 -; CHECK-NEXT: vs8r.v v16, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a4, a5, .LBB16_18 -; CHECK-NEXT: # %bb.17: -; CHECK-NEXT: li a4, 32 -; CHECK-NEXT: .LBB16_18: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v2, 2 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (t0) -; CHECK-NEXT: addi t0, a4, -16 -; CHECK-NEXT: addi a6, a1, 256 -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: bltu a4, t0, .LBB16_20 -; CHECK-NEXT: # %bb.19: -; CHECK-NEXT: mv a1, t0 -; CHECK-NEXT: .LBB16_20: -; CHECK-NEXT: vle64.v v8, (a6) +; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t +; CHECK-NEXT: addi a1, a4, -16 +; CHECK-NEXT: sltu a4, a4, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v4 +; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a4, a3, .LBB16_22 -; CHECK-NEXT: # %bb.21: -; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: .LBB16_22: -; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma -; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t -; CHECK-NEXT: bltu a7, a5, .LBB16_24 -; CHECK-NEXT: # %bb.23: +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a7, a3, .LBB16_12 +; CHECK-NEXT: # %bb.11: ; CHECK-NEXT: li a7, 32 -; CHECK-NEXT: .LBB16_24: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a1, a7, -16 -; CHECK-NEXT: vslidedown.vi v0, v1, 2 -; CHECK-NEXT: bltu a7, a1, .LBB16_26 -; CHECK-NEXT: # %bb.25: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB16_26: -; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, ma +; CHECK-NEXT: .LBB16_12: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, tu, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 48 ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vslideup.vi v8, v16, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vslideup.vi v16, v24, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 48 ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 40 ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vslideup.vi v8, v16, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vslideup.vi v16, v24, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 40 ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vslideup.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vslideup.vi v8, v16, 16 +; CHECK-NEXT: addi a1, a7, -16 +; CHECK-NEXT: sltu a4, a7, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v0, v2 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 24 -; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: li a4, 24 +; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a7, a3, .LBB16_28 -; CHECK-NEXT: # %bb.27: +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a7, a2, .LBB16_14 +; CHECK-NEXT: # %bb.13: ; CHECK-NEXT: li a7, 16 -; CHECK-NEXT: .LBB16_28: +; CHECK-NEXT: .LBB16_14: ; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, ma +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t +; CHECK-NEXT: vsetvli zero, a3, e32, m8, tu, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vslideup.vi v16, v8, 16 -; CHECK-NEXT: vse32.v v16, (a0) +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vslideup.vi v24, v16, 16 +; CHECK-NEXT: vse32.v v24, (a0) ; CHECK-NEXT: addi a1, a0, 256 -; CHECK-NEXT: vse32.v v24, (a1) +; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: li a3, 40 @@ -507,21 +507,19 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB17_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB17_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB17_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB17_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB17_4: +; CHECK-NEXT: .LBB17_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -666,62 +666,62 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v25, v0, 2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vslidedown.vi v1, v0, 2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v25, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 3 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v24, v16, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 @@ -734,69 +734,58 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v24 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v25, v24 -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 3 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -666,62 +666,62 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v25, v0, 2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vslidedown.vi v1, v0, 2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v25, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 2 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v24, v16, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 @@ -734,69 +734,58 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v24 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v25, v24 -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 2 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -317,21 +317,19 @@ ; CHECK-LABEL: vfptosi_v32i64_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t @@ -343,21 +341,20 @@ define <32 x i64> @vfptosi_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v32i64_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) ret <32 x i64> %v Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -317,21 +317,19 @@ ; CHECK-LABEL: vfptoui_v32i64_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t @@ -343,21 +341,20 @@ define <32 x i64> @vfptoui_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v32i64_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) ret <32 x i64> %v Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -121,27 +121,26 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v64f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a0, -32 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a0, a2, .LBB8_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 -; CHECK-NEXT: bltu a0, a2, .LBB8_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: .LBB8_4: +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB8_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, ft0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: addi a1, a0, -32 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredusum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -153,27 +152,26 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v64f32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a0, -32 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a0, a2, .LBB9_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 -; CHECK-NEXT: bltu a0, a2, .LBB9_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: .LBB9_4: +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB9_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, ft0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: addi a1, a0, -32 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredosum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -845,27 +845,26 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v64i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, a1, -32 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a1, a3, .LBB49_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB49_2: ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 -; CHECK-NEXT: bltu a1, a3, .LBB49_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: .LBB49_4: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB49_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 32 +; CHECK-NEXT: .LBB49_2: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v25, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, ma ; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, ma +; CHECK-NEXT: addi a0, a1, -32 +; CHECK-NEXT: sltu a1, a1, a0 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vredxor.vs v8, v16, v8, v0.t ; CHECK-NEXT: vmv.x.s a0, v8 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -251,30 +251,28 @@ define signext i1 @vpreduce_and_v256i1(i1 signext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v256i1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a1, -128 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a1, a2, .LBB14_2 +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB14_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: .LBB14_2: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vmnot.m v8, v8 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vcpop.m a2, v8, v0.t -; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vmnot.m v11, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vcpop.m a2, v11, v0.t ; CHECK-NEXT: seqz a2, a2 -; CHECK-NEXT: bltu a1, a3, .LBB14_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB14_4: +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: addi a2, a1, -128 +; CHECK-NEXT: sltu a1, a1, a2 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a2 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmnot.m v8, v11 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmnot.m v8, v8 +; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vcpop.m a1, v8, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 -; CHECK-NEXT: and a0, a2, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.v256i1(i1 %s, <256 x i1> %v, <256 x i1> %m, i32 %evl) Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -666,62 +666,62 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v25, v0, 2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vslidedown.vi v1, v0, 2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v25, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 4 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v24, v16, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 @@ -734,69 +734,58 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v24 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v25, v24 -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 4 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -666,62 +666,62 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v25, v0, 2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vslidedown.vi v1, v0, 2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v25, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v24, v16, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 @@ -734,69 +734,58 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v24 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v25, v24 -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -666,62 +666,62 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vslidedown.vi v25, v0, 2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vslidedown.vi v1, v0, 2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: lui a2, %hi(.LCPI26_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI26_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v25, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 1 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: fsrm a1 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfabs.v v24, v16, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t +; CHECK-NEXT: fsrmi a0, 1 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 @@ -734,69 +734,58 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: vmset.m v24 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vmset.m v1 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: lui a2, %hi(.LCPI27_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI27_0)(a2) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmv1r.v v25, v24 -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a1, 1 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -546,41 +546,48 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: addi a4, a0, 128 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vle16.v v24, (a4) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi a3, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma -; CHECK-NEXT: addi a4, a2, -64 +; CHECK-NEXT: addi a0, a2, -64 +; CHECK-NEXT: sltu a3, a2, a0 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a0, a3, a0 ; CHECK-NEXT: vslidedown.vi v0, v0, 8 -; CHECK-NEXT: bltu a2, a4, .LBB43_2 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vmfeq.vv v1, v16, v8, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB43_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB43_2: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma -; CHECK-NEXT: vmfeq.vv v2, v16, v24, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB43_4 -; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: .LBB43_4: +; CHECK-NEXT: .LBB43_2: ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t +; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vsetivli zero, 16, e8, m1, tu, ma -; CHECK-NEXT: vslideup.vi v16, v2, 8 +; CHECK-NEXT: vslideup.vi v16, v1, 8 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1149,62 +1156,48 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB87_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB87_2: -; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: addi a1, a2, -16 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v1, v16, v8, v0.t -; CHECK-NEXT: bltu a2, a0, .LBB87_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a0, .LBB87_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB87_4: +; CHECK-NEXT: .LBB87_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v2 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v16, v1, 2 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -631,59 +631,46 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a4, 24 -; CHECK-NEXT: mul a1, a1, a4 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: li a4, 128 -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: bltu a3, a4, .LBB51_2 -; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB51_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vlm.v v24, (a2) -; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: addi a0, a3, -128 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vmseq.vv v1, v8, v16, v0.t -; CHECK-NEXT: bltu a3, a0, .LBB51_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a0 -; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vlm.v v0, (a2) +; CHECK-NEXT: addi a2, a0, 128 +; CHECK-NEXT: vle8.v v8, (a2) +; CHECK-NEXT: addi a2, a3, -128 +; CHECK-NEXT: sltu a4, a3, a2 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: and a0, a4, a2 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t +; CHECK-NEXT: bltu a3, a1, .LBB51_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: .LBB51_2: +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v2 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmseq.vv v8, v16, v24, v0.t -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -694,23 +681,20 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_v256i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a1) -; CHECK-NEXT: addi a4, a2, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a4, .LBB52_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a4 -; CHECK-NEXT: .LBB52_2: +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: addi a1, a2, -128 +; CHECK-NEXT: sltu a4, a2, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB52_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a3, .LBB52_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB52_4: +; CHECK-NEXT: .LBB52_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t @@ -726,23 +710,20 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_swap_v256i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a1) -; CHECK-NEXT: addi a4, a2, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a4, .LBB53_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a4 -; CHECK-NEXT: .LBB53_2: +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: addi a1, a2, -128 +; CHECK-NEXT: sltu a4, a2, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB53_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a3, .LBB53_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB53_4: +; CHECK-NEXT: .LBB53_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t @@ -1338,41 +1319,48 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: addi a4, a0, 128 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vle32.v v24, (a4) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: addi a3, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-NEXT: addi a4, a2, -32 +; CHECK-NEXT: addi a0, a2, -32 +; CHECK-NEXT: sltu a3, a2, a0 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a0, a3, a0 ; CHECK-NEXT: vslidedown.vi v0, v0, 4 -; CHECK-NEXT: bltu a2, a4, .LBB99_2 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB99_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB99_2: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vmseq.vv v2, v16, v24, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB99_4 -; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: .LBB99_4: +; CHECK-NEXT: .LBB99_2: ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmseq.vv v16, v24, v8, v0.t +; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma -; CHECK-NEXT: vslideup.vi v16, v2, 4 +; CHECK-NEXT: vslideup.vi v16, v1, 4 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1384,21 +1372,19 @@ ; CHECK-LABEL: icmp_eq_vx_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-NEXT: addi a3, a1, -32 ; CHECK-NEXT: vslidedown.vi v0, v0, 4 -; CHECK-NEXT: bltu a1, a3, .LBB100_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB100_2: +; CHECK-NEXT: addi a2, a1, -32 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB100_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB100_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: .LBB100_4: +; CHECK-NEXT: .LBB100_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t @@ -1416,21 +1402,19 @@ ; CHECK-LABEL: icmp_eq_vx_swap_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-NEXT: addi a3, a1, -32 ; CHECK-NEXT: vslidedown.vi v0, v0, 4 -; CHECK-NEXT: bltu a1, a3, .LBB101_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB101_2: +; CHECK-NEXT: addi a2, a1, -32 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB101_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB101_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: .LBB101_4: +; CHECK-NEXT: .LBB101_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll @@ -154,23 +154,21 @@ ; CHECK-LABEL: vsext_v32i64_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB12_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vsext.vf2 v16, v24, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB12_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB12_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vsext.vf2 v24, v8, v0.t @@ -183,21 +181,19 @@ define <32 x i64> @vsext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v32i64_v32i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a0, a2, .LBB13_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vsext.vf2 v16, v24 -; CHECK-NEXT: bltu a0, a1, .LBB13_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB13_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v24, v8 ; CHECK-NEXT: vmv.v.v v8, v24 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -309,21 +309,19 @@ ; CHECK-LABEL: vsitofp_v32f64_v32i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t @@ -335,21 +333,20 @@ define <32 x double> @vsitofp_v32f64_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v32f64_v32i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfcvt.f.x.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfcvt.f.x.v v16, v16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) ret <32 x double> %v Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -605,50 +605,48 @@ define <32 x double> @strided_vpload_v32f64(double* %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind { ; CHECK-RV32-LABEL: strided_vpload_v32f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: addi a4, a2, -16 +; CHECK-RV32-NEXT: li a4, 16 ; CHECK-RV32-NEXT: vmv1r.v v8, v0 -; CHECK-RV32-NEXT: li a3, 0 +; CHECK-RV32-NEXT: mv a3, a2 ; CHECK-RV32-NEXT: bltu a2, a4, .LBB33_2 ; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a3, a4 +; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB33_2: -; CHECK-RV32-NEXT: li a4, 16 -; CHECK-RV32-NEXT: bltu a2, a4, .LBB33_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: li a2, 16 -; CHECK-RV32-NEXT: .LBB33_4: -; CHECK-RV32-NEXT: mul a4, a2, a1 +; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 +; CHECK-RV32-NEXT: addi a5, a2, -16 +; CHECK-RV32-NEXT: sltu a2, a2, a5 +; CHECK-RV32-NEXT: addi a2, a2, -1 +; CHECK-RV32-NEXT: and a2, a2, a5 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu -; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v32f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: addi a4, a2, -16 +; CHECK-RV64-NEXT: li a4, 16 ; CHECK-RV64-NEXT: vmv1r.v v8, v0 -; CHECK-RV64-NEXT: li a3, 0 +; CHECK-RV64-NEXT: mv a3, a2 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB33_2 ; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a3, a4 +; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: .LBB33_2: -; CHECK-RV64-NEXT: li a4, 16 -; CHECK-RV64-NEXT: bltu a2, a4, .LBB33_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: li a2, 16 -; CHECK-RV64-NEXT: .LBB33_4: -; CHECK-RV64-NEXT: mul a4, a2, a1 +; CHECK-RV64-NEXT: mul a4, a3, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 +; CHECK-RV64-NEXT: addi a5, a2, -16 +; CHECK-RV64-NEXT: sltu a2, a2, a5 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a5 ; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu -; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret @@ -659,43 +657,41 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) nounwind { ; CHECK-RV32-LABEL: strided_vpload_v32f64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: addi a4, a2, -16 -; CHECK-RV32-NEXT: li a3, 0 +; CHECK-RV32-NEXT: li a4, 16 +; CHECK-RV32-NEXT: mv a3, a2 ; CHECK-RV32-NEXT: bltu a2, a4, .LBB34_2 ; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a3, a4 +; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB34_2: -; CHECK-RV32-NEXT: li a4, 16 -; CHECK-RV32-NEXT: bltu a2, a4, .LBB34_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: li a2, 16 -; CHECK-RV32-NEXT: .LBB34_4: -; CHECK-RV32-NEXT: mul a4, a2, a1 +; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1 +; CHECK-RV32-NEXT: addi a5, a2, -16 +; CHECK-RV32-NEXT: sltu a2, a2, a5 +; CHECK-RV32-NEXT: addi a2, a2, -1 +; CHECK-RV32-NEXT: and a2, a2, a5 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1 +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v32f64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: addi a4, a2, -16 -; CHECK-RV64-NEXT: li a3, 0 +; CHECK-RV64-NEXT: li a4, 16 +; CHECK-RV64-NEXT: mv a3, a2 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB34_2 ; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a3, a4 +; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: .LBB34_2: -; CHECK-RV64-NEXT: li a4, 16 -; CHECK-RV64-NEXT: bltu a2, a4, .LBB34_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: li a2, 16 -; CHECK-RV64-NEXT: .LBB34_4: -; CHECK-RV64-NEXT: mul a4, a2, a1 +; CHECK-RV64-NEXT: mul a4, a3, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1 +; CHECK-RV64-NEXT: addi a5, a2, -16 +; CHECK-RV64-NEXT: sltu a2, a2, a5 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a5 ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1 +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement <32 x i1> poison, i1 true, i32 0 @@ -717,51 +713,46 @@ ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: li a3, 32 ; CHECK-RV32-NEXT: .LBB35_2: -; CHECK-RV32-NEXT: addi a5, a3, -16 -; CHECK-RV32-NEXT: li a7, 0 -; CHECK-RV32-NEXT: bltu a3, a5, .LBB35_4 +; CHECK-RV32-NEXT: mul a5, a3, a2 +; CHECK-RV32-NEXT: addi a6, a4, -32 +; CHECK-RV32-NEXT: sltu a4, a4, a6 +; CHECK-RV32-NEXT: addi a4, a4, -1 +; CHECK-RV32-NEXT: and a6, a4, a6 +; CHECK-RV32-NEXT: li a4, 16 +; CHECK-RV32-NEXT: add a5, a1, a5 +; CHECK-RV32-NEXT: bltu a6, a4, .LBB35_4 ; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv a7, a5 -; CHECK-RV32-NEXT: .LBB35_4: ; CHECK-RV32-NEXT: li a6, 16 -; CHECK-RV32-NEXT: mv a5, a3 -; CHECK-RV32-NEXT: bltu a3, a6, .LBB35_6 +; CHECK-RV32-NEXT: .LBB35_4: +; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma +; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4 +; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-RV32-NEXT: vlse64.v v16, (a5), a2, v0.t +; CHECK-RV32-NEXT: addi a5, a3, -16 +; CHECK-RV32-NEXT: sltu a6, a3, a5 +; CHECK-RV32-NEXT: addi a6, a6, -1 +; CHECK-RV32-NEXT: and a5, a6, a5 +; CHECK-RV32-NEXT: bltu a3, a4, .LBB35_6 ; CHECK-RV32-NEXT: # %bb.5: -; CHECK-RV32-NEXT: li a5, 16 +; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB35_6: -; CHECK-RV32-NEXT: mul t0, a5, a2 -; CHECK-RV32-NEXT: add t0, a1, t0 +; CHECK-RV32-NEXT: mul a4, a3, a2 +; CHECK-RV32-NEXT: add a4, a1, a4 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu -; CHECK-RV32-NEXT: vlse64.v v16, (t0), a2, v0.t -; CHECK-RV32-NEXT: addi t0, a4, -32 -; CHECK-RV32-NEXT: li a7, 0 -; CHECK-RV32-NEXT: bltu a4, t0, .LBB35_8 -; CHECK-RV32-NEXT: # %bb.7: -; CHECK-RV32-NEXT: mv a7, t0 -; CHECK-RV32-NEXT: .LBB35_8: -; CHECK-RV32-NEXT: bltu a7, a6, .LBB35_10 -; CHECK-RV32-NEXT: # %bb.9: -; CHECK-RV32-NEXT: li a7, 16 -; CHECK-RV32-NEXT: .LBB35_10: -; CHECK-RV32-NEXT: mul a3, a3, a2 -; CHECK-RV32-NEXT: add a3, a1, a3 -; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4 -; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu -; CHECK-RV32-NEXT: vlse64.v v24, (a3), a2, v0.t ; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV32-NEXT: vlse64.v v24, (a4), a2, v0.t +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t ; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-RV32-NEXT: vse64.v v8, (a0) ; CHECK-RV32-NEXT: addi a1, a0, 256 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma -; CHECK-RV32-NEXT: vse64.v v24, (a1) +; CHECK-RV32-NEXT: vse64.v v16, (a1) ; CHECK-RV32-NEXT: addi a0, a0, 128 ; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-RV32-NEXT: vse64.v v16, (a0) +; CHECK-RV32-NEXT: vse64.v v24, (a0) ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_load_v33f64: @@ -773,51 +764,46 @@ ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: li a4, 32 ; CHECK-RV64-NEXT: .LBB35_2: -; CHECK-RV64-NEXT: addi a5, a4, -16 -; CHECK-RV64-NEXT: li a7, 0 -; CHECK-RV64-NEXT: bltu a4, a5, .LBB35_4 +; CHECK-RV64-NEXT: mul a5, a4, a2 +; CHECK-RV64-NEXT: addi a6, a3, -32 +; CHECK-RV64-NEXT: sltu a3, a3, a6 +; CHECK-RV64-NEXT: addi a3, a3, -1 +; CHECK-RV64-NEXT: and a6, a3, a6 +; CHECK-RV64-NEXT: li a3, 16 +; CHECK-RV64-NEXT: add a5, a1, a5 +; CHECK-RV64-NEXT: bltu a6, a3, .LBB35_4 ; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv a7, a5 -; CHECK-RV64-NEXT: .LBB35_4: ; CHECK-RV64-NEXT: li a6, 16 -; CHECK-RV64-NEXT: mv a5, a4 -; CHECK-RV64-NEXT: bltu a4, a6, .LBB35_6 +; CHECK-RV64-NEXT: .LBB35_4: +; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma +; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4 +; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-RV64-NEXT: vlse64.v v16, (a5), a2, v0.t +; CHECK-RV64-NEXT: addi a5, a4, -16 +; CHECK-RV64-NEXT: sltu a6, a4, a5 +; CHECK-RV64-NEXT: addi a6, a6, -1 +; CHECK-RV64-NEXT: and a5, a6, a5 +; CHECK-RV64-NEXT: bltu a4, a3, .LBB35_6 ; CHECK-RV64-NEXT: # %bb.5: -; CHECK-RV64-NEXT: li a5, 16 +; CHECK-RV64-NEXT: li a4, 16 ; CHECK-RV64-NEXT: .LBB35_6: -; CHECK-RV64-NEXT: mul t0, a5, a2 -; CHECK-RV64-NEXT: add t0, a1, t0 -; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 -; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu -; CHECK-RV64-NEXT: vlse64.v v16, (t0), a2, v0.t -; CHECK-RV64-NEXT: addi t0, a3, -32 -; CHECK-RV64-NEXT: li a7, 0 -; CHECK-RV64-NEXT: bltu a3, t0, .LBB35_8 -; CHECK-RV64-NEXT: # %bb.7: -; CHECK-RV64-NEXT: mv a7, t0 -; CHECK-RV64-NEXT: .LBB35_8: -; CHECK-RV64-NEXT: bltu a7, a6, .LBB35_10 -; CHECK-RV64-NEXT: # %bb.9: -; CHECK-RV64-NEXT: li a7, 16 -; CHECK-RV64-NEXT: .LBB35_10: ; CHECK-RV64-NEXT: mul a3, a4, a2 ; CHECK-RV64-NEXT: add a3, a1, a3 -; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4 -; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu -; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t +; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t +; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t ; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-RV64-NEXT: vse64.v v8, (a0) ; CHECK-RV64-NEXT: addi a1, a0, 256 ; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m8, ta, ma -; CHECK-RV64-NEXT: vse64.v v24, (a1) +; CHECK-RV64-NEXT: vse64.v v16, (a1) ; CHECK-RV64-NEXT: addi a0, a0, 128 ; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-RV64-NEXT: vse64.v v16, (a0) +; CHECK-RV64-NEXT: vse64.v v24, (a0) ; CHECK-RV64-NEXT: ret %v = call <33 x double> @llvm.experimental.vp.strided.load.v33f64.p0f64.i64(double* %ptr, i64 %stride, <33 x i1> %mask, i32 %evl) ret <33 x double> %v Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -503,19 +503,17 @@ ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB27_2: -; CHECK-RV32-NEXT: li a4, 0 ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV32-NEXT: addi a5, a2, -16 ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t -; CHECK-RV32-NEXT: bltu a2, a5, .LBB27_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv a4, a5 -; CHECK-RV32-NEXT: .LBB27_4: -; CHECK-RV32-NEXT: mul a2, a3, a1 -; CHECK-RV32-NEXT: add a0, a0, a2 +; CHECK-RV32-NEXT: mul a3, a3, a1 +; CHECK-RV32-NEXT: add a0, a0, a3 +; CHECK-RV32-NEXT: addi a3, a2, -16 +; CHECK-RV32-NEXT: sltu a2, a2, a3 +; CHECK-RV32-NEXT: addi a2, a2, -1 +; CHECK-RV32-NEXT: and a2, a2, a3 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; @@ -527,19 +525,17 @@ ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: .LBB27_2: -; CHECK-RV64-NEXT: li a4, 0 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV64-NEXT: addi a5, a2, -16 ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t -; CHECK-RV64-NEXT: bltu a2, a5, .LBB27_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv a4, a5 -; CHECK-RV64-NEXT: .LBB27_4: -; CHECK-RV64-NEXT: mul a2, a3, a1 -; CHECK-RV64-NEXT: add a0, a0, a2 +; CHECK-RV64-NEXT: mul a3, a3, a1 +; CHECK-RV64-NEXT: add a0, a0, a3 +; CHECK-RV64-NEXT: addi a3, a2, -16 +; CHECK-RV64-NEXT: sltu a2, a2, a3 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a3 ; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v32f64.p0f64.i32(<32 x double> %v, double* %ptr, i32 %stride, <32 x i1> %mask, i32 %evl) @@ -555,17 +551,15 @@ ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB28_2: -; CHECK-RV32-NEXT: li a4, 0 ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV32-NEXT: addi a5, a2, -16 ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1 -; CHECK-RV32-NEXT: bltu a2, a5, .LBB28_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv a4, a5 -; CHECK-RV32-NEXT: .LBB28_4: -; CHECK-RV32-NEXT: mul a2, a3, a1 -; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-RV32-NEXT: mul a3, a3, a1 +; CHECK-RV32-NEXT: add a0, a0, a3 +; CHECK-RV32-NEXT: addi a3, a2, -16 +; CHECK-RV32-NEXT: sltu a2, a2, a3 +; CHECK-RV32-NEXT: addi a2, a2, -1 +; CHECK-RV32-NEXT: and a2, a2, a3 +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV32-NEXT: ret ; @@ -577,17 +571,15 @@ ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: .LBB28_2: -; CHECK-RV64-NEXT: li a4, 0 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV64-NEXT: addi a5, a2, -16 ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1 -; CHECK-RV64-NEXT: bltu a2, a5, .LBB28_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv a4, a5 -; CHECK-RV64-NEXT: .LBB28_4: -; CHECK-RV64-NEXT: mul a2, a3, a1 -; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-RV64-NEXT: mul a3, a3, a1 +; CHECK-RV64-NEXT: add a0, a0, a3 +; CHECK-RV64-NEXT: addi a3, a2, -16 +; CHECK-RV64-NEXT: sltu a2, a2, a3 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a3 +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement <32 x i1> poison, i1 true, i32 0 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll @@ -309,21 +309,19 @@ ; CHECK-LABEL: vuitofp_v32f64_v32i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t @@ -335,21 +333,20 @@ define <32 x double> @vuitofp_v32f64_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v32f64_v32i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB26_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfcvt.f.xu.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfcvt.f.xu.v v16, v16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) ret <32 x double> %v Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -413,23 +413,20 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a0) -; CHECK-NEXT: addi a3, a1, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: bltu a1, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: addi a0, a1, -128 +; CHECK-NEXT: sltu a3, a1, a0 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a0, a3, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t @@ -443,21 +440,20 @@ define <256 x i8> @vadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v258i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -128 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: li a2, 128 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB33_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB33_2: -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vadd.vi v16, v16, -1 -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 128 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: addi a1, a0, -128 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer @@ -1533,24 +1529,22 @@ ; RV32-LABEL: vadd_vx_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a1, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 -; RV32-NEXT: bltu a0, a2, .LBB108_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB108_2: +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a2, a0, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vadd.vv v16, v16, v24, v0.t -; RV32-NEXT: bltu a0, a1, .LBB108_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB108_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB108_4: +; RV32-NEXT: .LBB108_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vadd.vv v8, v8, v24, v0.t @@ -1559,21 +1553,19 @@ ; RV64-LABEL: vadd_vx_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a1, 0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a2, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: bltu a0, a2, .LBB108_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a1, a2 -; RV64-NEXT: .LBB108_2: +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a2, a0, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a1, a2, a1 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: li a1, 16 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t -; RV64-NEXT: bltu a0, a1, .LBB108_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a1, .LBB108_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB108_4: +; RV64-NEXT: .LBB108_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t @@ -1587,43 +1579,41 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vadd_vi_v32i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: li a1, 0 -; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: li a2, 16 ; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: mv a1, a0 ; RV32-NEXT: bltu a0, a2, .LBB109_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 +; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB109_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: li a1, 16 -; RV32-NEXT: vadd.vv v16, v16, v24 -; RV32-NEXT: bltu a0, a1, .LBB109_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB109_4: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a0, a0, a1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vi_v32i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, a0, -16 -; RV64-NEXT: li a2, 0 -; RV64-NEXT: bltu a0, a1, .LBB109_2 +; RV64-NEXT: li a2, 16 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB109_2 ; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a1 -; RV64-NEXT: .LBB109_2: -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: li a1, 16 -; RV64-NEXT: vadd.vi v16, v16, -1 -; RV64-NEXT: bltu a0, a1, .LBB109_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB109_4: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: .LBB109_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vadd.vi v8, v8, -1 +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a0, a0, a1 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vadd.vi v16, v16, -1 ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0 %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -324,59 +324,45 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: addi a1, a2, -16 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v16, v8, v0.t -; CHECK-NEXT: bltu a2, a0, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a0, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -390,22 +376,21 @@ ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: mv a0, a2 +; CHECK-NEXT: bltu a2, a1, .LBB27_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vfsgnj.vv v16, v16, v24 -; CHECK-NEXT: bltu a2, a0, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v0 +; CHECK-NEXT: addi a0, a2, -16 +; CHECK-NEXT: sltu a1, a2, a0 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfsgnj.vv v16, v16, v24 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll @@ -322,21 +322,19 @@ ; CHECK-LABEL: vfabs_vv_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfabs.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfabs.v v8, v8, v0.t @@ -348,21 +346,20 @@ define <32 x double> @vfabs_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB27_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfabs.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v16, v16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -657,109 +657,80 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 48 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: addi a1, a2, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 40 -; CHECK-NEXT: mul a1, a1, a3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a4, -16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a4, a3, .LBB50_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB50_2: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 24 -; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a1, a2, 128 +; CHECK-NEXT: addi a2, a4, -16 +; CHECK-NEXT: sltu a3, a4, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: addi a3, a0, 128 +; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: vle64.v v8, (a3) +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 40 -; CHECK-NEXT: mul a1, a1, a2 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 40 -; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a4, a0, .LBB50_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a0, .LBB50_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: .LBB50_4: +; CHECK-NEXT: .LBB50_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 40 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 48 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -780,50 +751,49 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a3, a4, -16 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vle64.v v24, (a2) ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a4, a3, .LBB51_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB51_2: -; CHECK-NEXT: vle64.v v8, (a2) -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: mv a0, a4 +; CHECK-NEXT: bltu a4, a1, .LBB51_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v24, v16, v8 -; CHECK-NEXT: bltu a4, a0, .LBB51_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-NEXT: .LBB51_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v0, v8, v24 +; CHECK-NEXT: addi a0, a4, -16 +; CHECK-NEXT: sltu a1, a4, a0 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v0, v16, v8 -; CHECK-NEXT: vmv.v.v v8, v0 -; CHECK-NEXT: vmv8r.v v16, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v24, v16, v8 +; CHECK-NEXT: vmv8r.v v8, v0 +; CHECK-NEXT: vmv.v.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -324,59 +324,45 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: addi a1, a2, -16 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfmax.vv v16, v16, v8, v0.t -; CHECK-NEXT: bltu a2, a0, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a0, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmax.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmax.vv v8, v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -390,22 +376,21 @@ ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: mv a0, a2 +; CHECK-NEXT: bltu a2, a1, .LBB27_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vfmax.vv v16, v16, v24 -; CHECK-NEXT: bltu a2, a0, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v0 +; CHECK-NEXT: addi a0, a2, -16 +; CHECK-NEXT: sltu a1, a2, a0 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vv v16, v16, v24 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -324,59 +324,45 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: addi a1, a2, -16 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfmin.vv v16, v16, v8, v0.t -; CHECK-NEXT: bltu a2, a0, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a0, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmin.vv v8, v24, v8, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -390,22 +376,21 @@ ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: mv a0, a2 +; CHECK-NEXT: bltu a2, a1, .LBB27_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vfmin.vv v16, v16, v24 -; CHECK-NEXT: bltu a2, a0, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v0 +; CHECK-NEXT: addi a0, a2, -16 +; CHECK-NEXT: sltu a1, a2, a0 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfmin.vv v16, v16, v24 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -657,109 +657,80 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 48 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: addi a1, a2, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 40 -; CHECK-NEXT: mul a1, a1, a3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a4, -16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a4, a3, .LBB50_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB50_2: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 24 -; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a1, a2, 128 +; CHECK-NEXT: addi a2, a4, -16 +; CHECK-NEXT: sltu a3, a4, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: addi a3, a0, 128 +; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: vle64.v v8, (a3) +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 40 -; CHECK-NEXT: mul a1, a1, a2 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a2, 40 -; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a4, a0, .LBB50_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a0, .LBB50_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: .LBB50_4: +; CHECK-NEXT: .LBB50_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 40 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 48 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -780,50 +751,49 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a3, a4, -16 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vle64.v v24, (a2) ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a4, a3, .LBB51_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB51_2: -; CHECK-NEXT: vle64.v v8, (a2) -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: mv a0, a4 +; CHECK-NEXT: bltu a4, a1, .LBB51_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v24, v16, v8 -; CHECK-NEXT: bltu a4, a0, .LBB51_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-NEXT: .LBB51_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v0, v8, v24 +; CHECK-NEXT: addi a0, a4, -16 +; CHECK-NEXT: sltu a1, a4, a0 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v0, v16, v8 -; CHECK-NEXT: vmv.v.v v8, v0 -; CHECK-NEXT: vmv8r.v v16, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v24, v16, v8 +; CHECK-NEXT: vmv8r.v v8, v0 +; CHECK-NEXT: vmv.v.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -322,21 +322,19 @@ ; CHECK-LABEL: vfneg_vv_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfneg.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfneg.v v8, v8, v0.t @@ -348,21 +346,20 @@ define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB27_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfneg.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll @@ -322,21 +322,19 @@ ; CHECK-LABEL: vfsqrt_vv_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfsqrt.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t @@ -348,21 +346,20 @@ define <32 x double> @vfsqrt_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v32f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a0, a1, .LBB27_2 +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsqrt.v v16, v16 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll @@ -288,23 +288,20 @@ define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a1) -; CHECK-NEXT: addi a4, a2, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a4, .LBB22_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a4 -; CHECK-NEXT: .LBB22_2: +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: addi a1, a2, -128 +; CHECK-NEXT: sltu a4, a2, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB22_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a3, .LBB22_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB22_4: +; CHECK-NEXT: .LBB22_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t @@ -318,21 +315,20 @@ define <256 x i8> @vmax_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v258i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a1, -128 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a1, a2, .LBB23_2 +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB23_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vmax.vx v16, v16, a0 -; CHECK-NEXT: bltu a1, a2, .LBB23_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB23_4: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: .LBB23_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: addi a2, a1, -128 +; CHECK-NEXT: sltu a1, a1, a2 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vmax.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer @@ -1100,24 +1096,22 @@ ; RV32-LABEL: vmax_vx_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a1, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 -; RV32-NEXT: bltu a0, a2, .LBB74_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB74_2: +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a2, a0, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vmax.vv v16, v16, v24, v0.t -; RV32-NEXT: bltu a0, a1, .LBB74_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB74_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB74_4: +; RV32-NEXT: .LBB74_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vmax.vv v8, v8, v24, v0.t @@ -1126,22 +1120,20 @@ ; RV64-LABEL: vmax_vx_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a1, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: bltu a0, a1, .LBB74_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a1 -; RV64-NEXT: .LBB74_2: +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a2, a0, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a1 ; RV64-NEXT: li a1, -1 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: li a2, 16 ; RV64-NEXT: vmax.vx v16, v16, a1, v0.t -; RV64-NEXT: bltu a0, a2, .LBB74_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a2, .LBB74_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB74_4: +; RV64-NEXT: .LBB74_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vmax.vx v8, v8, a1, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll @@ -287,23 +287,20 @@ define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a1) -; CHECK-NEXT: addi a4, a2, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a4, .LBB22_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a4 -; CHECK-NEXT: .LBB22_2: +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: addi a1, a2, -128 +; CHECK-NEXT: sltu a4, a2, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB22_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a3, .LBB22_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB22_4: +; CHECK-NEXT: .LBB22_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t @@ -317,21 +314,20 @@ define <256 x i8> @vmaxu_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v258i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a1, -128 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a1, a2, .LBB23_2 +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB23_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vmaxu.vx v16, v16, a0 -; CHECK-NEXT: bltu a1, a2, .LBB23_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB23_4: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: .LBB23_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: addi a2, a1, -128 +; CHECK-NEXT: sltu a1, a1, a2 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vmaxu.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer @@ -1099,24 +1095,22 @@ ; RV32-LABEL: vmaxu_vx_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a1, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 -; RV32-NEXT: bltu a0, a2, .LBB74_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB74_2: +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a2, a0, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t -; RV32-NEXT: bltu a0, a1, .LBB74_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB74_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB74_4: +; RV32-NEXT: .LBB74_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vmaxu.vv v8, v8, v24, v0.t @@ -1125,22 +1119,20 @@ ; RV64-LABEL: vmaxu_vx_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a1, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: bltu a0, a1, .LBB74_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a1 -; RV64-NEXT: .LBB74_2: +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a2, a0, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a1 ; RV64-NEXT: li a1, -1 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: li a2, 16 ; RV64-NEXT: vmaxu.vx v16, v16, a1, v0.t -; RV64-NEXT: bltu a0, a2, .LBB74_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a2, .LBB74_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB74_4: +; RV64-NEXT: .LBB74_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vmaxu.vx v8, v8, a1, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll @@ -288,23 +288,20 @@ define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a1) -; CHECK-NEXT: addi a4, a2, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a4, .LBB22_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a4 -; CHECK-NEXT: .LBB22_2: +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: addi a1, a2, -128 +; CHECK-NEXT: sltu a4, a2, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB22_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a3, .LBB22_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB22_4: +; CHECK-NEXT: .LBB22_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t @@ -318,21 +315,20 @@ define <256 x i8> @vmin_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v258i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a1, -128 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a1, a2, .LBB23_2 +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB23_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vmin.vx v16, v16, a0 -; CHECK-NEXT: bltu a1, a2, .LBB23_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB23_4: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: .LBB23_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: addi a2, a1, -128 +; CHECK-NEXT: sltu a1, a1, a2 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vmin.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer @@ -1100,24 +1096,22 @@ ; RV32-LABEL: vmin_vx_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a1, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 -; RV32-NEXT: bltu a0, a2, .LBB74_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB74_2: +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a2, a0, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vmin.vv v16, v16, v24, v0.t -; RV32-NEXT: bltu a0, a1, .LBB74_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB74_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB74_4: +; RV32-NEXT: .LBB74_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vmin.vv v8, v8, v24, v0.t @@ -1126,22 +1120,20 @@ ; RV64-LABEL: vmin_vx_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a1, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: bltu a0, a1, .LBB74_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a1 -; RV64-NEXT: .LBB74_2: +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a2, a0, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a1 ; RV64-NEXT: li a1, -1 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: li a2, 16 ; RV64-NEXT: vmin.vx v16, v16, a1, v0.t -; RV64-NEXT: bltu a0, a2, .LBB74_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a2, .LBB74_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB74_4: +; RV64-NEXT: .LBB74_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vmin.vx v8, v8, a1, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll @@ -287,23 +287,20 @@ define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a1) -; CHECK-NEXT: addi a4, a2, -128 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a4, .LBB22_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a4 -; CHECK-NEXT: .LBB22_2: +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: addi a1, a2, -128 +; CHECK-NEXT: sltu a4, a2, a1 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a1, a4, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB22_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a3, .LBB22_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB22_4: +; CHECK-NEXT: .LBB22_2: ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t @@ -317,21 +314,20 @@ define <256 x i8> @vminu_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v258i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a1, -128 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a1, a2, .LBB23_2 +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB23_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vminu.vx v16, v16, a0 -; CHECK-NEXT: bltu a1, a2, .LBB23_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: .LBB23_4: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: .LBB23_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: addi a2, a1, -128 +; CHECK-NEXT: sltu a1, a1, a2 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vminu.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer @@ -1099,24 +1095,22 @@ ; RV32-LABEL: vminu_vx_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a1, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 -; RV32-NEXT: bltu a0, a2, .LBB74_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB74_2: +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a2, a0, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vminu.vv v16, v16, v24, v0.t -; RV32-NEXT: bltu a0, a1, .LBB74_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB74_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB74_4: +; RV32-NEXT: .LBB74_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vminu.vv v8, v8, v24, v0.t @@ -1125,22 +1119,20 @@ ; RV64-LABEL: vminu_vx_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a1, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: bltu a0, a1, .LBB74_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a1 -; RV64-NEXT: .LBB74_2: +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a2, a0, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a1 ; RV64-NEXT: li a1, -1 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: li a2, 16 ; RV64-NEXT: vminu.vx v16, v16, a1, v0.t -; RV64-NEXT: bltu a0, a2, .LBB74_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a2, .LBB74_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB74_4: +; RV64-NEXT: .LBB74_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vminu.vx v8, v8, a1, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -285,26 +285,24 @@ ; ; RV64-LABEL: vpgather_baseidx_v32i8: ; RV64: # %bb.0: -; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vmv1r.v v10, v0 -; RV64-NEXT: li a2, 0 -; RV64-NEXT: bltu a1, a3, .LBB13_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB13_2: +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v10, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB13_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB13_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB13_4: +; RV64-NEXT: .LBB13_2: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu @@ -1894,24 +1892,22 @@ define <32 x double> @vpgather_v32f64(<32 x double*> %ptrs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v32f64: ; RV32: # %bb.0: -; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: bltu a0, a2, .LBB86_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB86_2: +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a2, a0, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v8, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV32-NEXT: vslidedown.vi v0, v1, 2 +; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v24, v0.t ; RV32-NEXT: li a1, 16 -; RV32-NEXT: bltu a0, a1, .LBB86_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB86_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a0, 16 -; RV32-NEXT: .LBB86_4: +; RV32-NEXT: .LBB86_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t @@ -1920,22 +1916,20 @@ ; ; RV64-LABEL: vpgather_v32f64: ; RV64: # %bb.0: -; RV64-NEXT: addi a2, a0, -16 ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bltu a0, a2, .LBB86_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a1, a2 -; RV64-NEXT: .LBB86_2: +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a2, a0, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a1, a2, a1 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v24, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t ; RV64-NEXT: li a1, 16 -; RV64-NEXT: bltu a0, a1, .LBB86_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a1, .LBB86_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: .LBB86_4: +; RV64-NEXT: .LBB86_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t @@ -1947,57 +1941,54 @@ define <32 x double> @vpgather_baseidx_v32i8_v32f64(double* %base, <32 x i8> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32i8_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB87_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB87_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB87_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB87_4: +; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB87_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB87_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v32i8_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v10, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vsext.vf8 v24, v8 -; RV64-NEXT: bltu a1, a3, .LBB87_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB87_2: ; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v10, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB87_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB87_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB87_4: +; RV64-NEXT: .LBB87_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2010,57 +2001,54 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v32i8_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB88_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB88_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB88_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB88_4: +; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB88_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB88_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v32i8_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v10, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsext.vf8 v24, v8 -; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: bltu a1, a3, .LBB88_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB88_2: ; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v10, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB88_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB88_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB88_4: +; RV64-NEXT: .LBB88_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2074,57 +2062,54 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v32i8_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vzext.vf4 v16, v8 -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB89_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB89_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB89_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB89_4: +; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB89_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB89_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v32i8_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v10, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vzext.vf8 v24, v8 -; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: bltu a1, a3, .LBB89_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB89_2: ; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v10, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB89_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB89_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB89_4: +; RV64-NEXT: .LBB89_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2138,57 +2123,54 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(double* %base, <32 x i16> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32i16_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB90_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB90_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB90_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB90_4: +; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB90_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB90_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v32i16_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vsext.vf4 v24, v8 -; RV64-NEXT: bltu a1, a3, .LBB90_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB90_2: ; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v12, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB90_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB90_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB90_4: +; RV64-NEXT: .LBB90_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2201,57 +2183,54 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i16> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v32i16_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB91_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB91_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB91_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB91_4: +; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB91_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB91_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v32i16_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vsext.vf4 v0, v16 -; RV64-NEXT: vsext.vf4 v24, v8 -; RV64-NEXT: addi a3, a1, -16 -; RV64-NEXT: vsll.vi v16, v0, 3 -; RV64-NEXT: bltu a1, a3, .LBB91_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB91_2: -; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: vsext.vf4 v24, v16 +; RV64-NEXT: vsext.vf4 v0, v8 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsll.vi v24, v0, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB91_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB91_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB91_4: +; RV64-NEXT: .LBB91_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2265,57 +2244,54 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i16> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v32i16_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vzext.vf2 v16, v8 -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB92_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB92_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB92_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB92_4: +; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB92_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB92_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v32i16_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vzext.vf4 v0, v16 -; RV64-NEXT: vzext.vf4 v24, v8 -; RV64-NEXT: addi a3, a1, -16 -; RV64-NEXT: vsll.vi v16, v0, 3 -; RV64-NEXT: bltu a1, a3, .LBB92_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB92_2: -; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: vzext.vf4 v24, v16 +; RV64-NEXT: vzext.vf4 v0, v8 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsll.vi v24, v0, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB92_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB92_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB92_4: +; RV64-NEXT: .LBB92_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2329,56 +2305,53 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(double* %base, <32 x i32> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32i32_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB93_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB93_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB93_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB93_4: +; RV32-NEXT: vsll.vi v24, v8, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB93_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB93_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v32i32_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v0, v16 ; RV64-NEXT: vsll.vi v16, v0, 3 -; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vsext.vf2 v0, v8 -; RV64-NEXT: bltu a1, a3, .LBB93_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB93_2: ; RV64-NEXT: vsll.vi v8, v0, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB93_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB93_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB93_4: +; RV64-NEXT: .LBB93_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -2391,29 +2364,28 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i32> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v32i32_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB94_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB94_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB94_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB94_4: +; RV32-NEXT: vsll.vi v24, v8, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB94_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB94_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v32i32_v32f64: @@ -2425,30 +2397,28 @@ ; RV64-NEXT: sub sp, sp, a2 ; RV64-NEXT: addi a2, sp, 16 ; RV64-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vsext.vf2 v24, v16 -; RV64-NEXT: vsext.vf2 v0, v8 -; RV64-NEXT: addi a3, a1, -16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: bltu a1, a3, .LBB94_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB94_2: -; RV64-NEXT: vsll.vi v8, v0, 3 +; RV64-NEXT: vsext.vf2 v0, v16 +; RV64-NEXT: vsext.vf2 v24, v8 +; RV64-NEXT: vsll.vi v16, v0, 3 +; RV64-NEXT: vsll.vi v8, v24, 3 +; RV64-NEXT: addi a2, sp, 16 +; RV64-NEXT: vl1r.v v24, (a2) # Unknown-size Folded Reload +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a3, sp, 16 -; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vslidedown.vi v0, v24, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB94_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB94_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB94_4: +; RV64-NEXT: .LBB94_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -2466,29 +2436,28 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i32> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v32i32_v32f64: ; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: li a3, 16 -; RV32-NEXT: vsll.vi v16, v8, 3 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a3, .LBB95_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB95_2: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a1, a3, .LBB95_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB95_4: +; RV32-NEXT: vsll.vi v24, v8, 3 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a3, a1, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t +; RV32-NEXT: li a2, 16 +; RV32-NEXT: bltu a1, a2, .LBB95_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB95_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v32i32_v32f64: @@ -2500,30 +2469,28 @@ ; RV64-NEXT: sub sp, sp, a2 ; RV64-NEXT: addi a2, sp, 16 ; RV64-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vzext.vf2 v24, v16 -; RV64-NEXT: vzext.vf2 v0, v8 -; RV64-NEXT: addi a3, a1, -16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: bltu a1, a3, .LBB95_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB95_2: -; RV64-NEXT: vsll.vi v8, v0, 3 +; RV64-NEXT: vzext.vf2 v0, v16 +; RV64-NEXT: vzext.vf2 v24, v8 +; RV64-NEXT: vsll.vi v16, v0, 3 +; RV64-NEXT: vsll.vi v8, v24, 3 +; RV64-NEXT: addi a2, sp, 16 +; RV64-NEXT: vl1r.v v24, (a2) # Unknown-size Folded Reload +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: addi a3, sp, 16 -; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vslidedown.vi v0, v24, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB95_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB95_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB95_4: +; RV64-NEXT: .LBB95_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -2541,58 +2508,53 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: li a2, 0 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vnsrl.wi v16, v8, 0 -; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, tu, ma +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, tu, ma ; RV32-NEXT: vslideup.vi v16, v24, 16 ; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; RV32-NEXT: addi a3, a1, -16 -; RV32-NEXT: vsll.vi v24, v16, 3 +; RV32-NEXT: li a3, 16 +; RV32-NEXT: vsll.vi v16, v16, 3 +; RV32-NEXT: mv a2, a1 ; RV32-NEXT: bltu a1, a3, .LBB96_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a2, a3 +; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB96_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v8, v24, 16 +; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a1, a1, a2 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV32-NEXT: vslidedown.vi v0, v1, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: li a2, 16 -; RV32-NEXT: bltu a1, a2, .LBB96_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: li a1, 16 -; RV32-NEXT: .LBB96_4: +; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; RV32-NEXT: vmv1r.v v0, v1 -; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v32f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: addi a3, a1, -16 -; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: bltu a1, a3, .LBB96_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB96_2: ; RV64-NEXT: vsll.vi v8, v8, 3 +; RV64-NEXT: vsll.vi v16, v16, 3 +; RV64-NEXT: addi a2, a1, -16 +; RV64-NEXT: sltu a3, a1, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v24, 2 +; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t ; RV64-NEXT: li a2, 16 -; RV64-NEXT: bltu a1, a2, .LBB96_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB96_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 -; RV64-NEXT: .LBB96_4: +; RV64-NEXT: .LBB96_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -381,23 +381,21 @@ define <32 x double> @vpload_v32f64(<32 x double>* %ptr, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a3, a1, -16 ; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a1, a3, .LBB31_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB31_2: +; CHECK-NEXT: addi a2, a1, -16 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v8, 2 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a3, a0, 128 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v16, (a3), v0.t ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: bltu a1, a2, .LBB31_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB31_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB31_4: +; CHECK-NEXT: .LBB31_2: ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -420,37 +418,33 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB32_2: -; CHECK-NEXT: addi a5, a3, -16 -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: bltu a3, a5, .LBB32_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: addi a4, a3, -16 +; CHECK-NEXT: sltu a5, a3, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-NEXT: addi a5, a1, 128 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v16, (a5), v0.t -; CHECK-NEXT: addi a5, a2, -32 -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: bltu a2, a5, .LBB32_6 -; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: .LBB32_6: +; CHECK-NEXT: addi a4, a2, -32 +; CHECK-NEXT: sltu a2, a2, a4 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a4, a2, a4 ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: bltu a4, a2, .LBB32_8 -; CHECK-NEXT: # %bb.7: +; CHECK-NEXT: bltu a4, a2, .LBB32_4 +; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: .LBB32_8: +; CHECK-NEXT: .LBB32_4: ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-NEXT: addi a5, a1, 256 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v24, (a5), v0.t -; CHECK-NEXT: bltu a3, a2, .LBB32_10 -; CHECK-NEXT: # %bb.9: +; CHECK-NEXT: bltu a3, a2, .LBB32_6 +; CHECK-NEXT: # %bb.5: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB32_10: +; CHECK-NEXT: .LBB32_6: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a1), v0.t Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -1057,118 +1057,53 @@ declare <32 x double> @llvm.vp.merge.v32f64(<32 x i1>, <32 x double>, <32 x double>, i32) define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { -; RV32-LABEL: vpmerge_vv_v32f64: -; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 24 -; RV32-NEXT: mul a1, a1, a3 -; RV32-NEXT: sub sp, sp, a1 -; RV32-NEXT: addi a1, a0, 128 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vle64.v v24, (a1) -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 3 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; RV32-NEXT: addi a3, a2, -16 -; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 4 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV32-NEXT: li a1, 0 -; RV32-NEXT: bltu a2, a3, .LBB79_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB79_2: -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV32-NEXT: vslidedown.vi v0, v1, 2 -; RV32-NEXT: vsetvli zero, a1, e64, m8, tu, ma -; RV32-NEXT: li a0, 16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 4 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 3 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; RV32-NEXT: vmerge.vvm v16, v16, v24, v0 -; RV32-NEXT: bltu a2, a0, .LBB79_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: li a2, 16 -; RV32-NEXT: .LBB79_4: -; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma -; RV32-NEXT: vmv1r.v v0, v1 -; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vmerge.vvm v8, v8, v24, v0 -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: li a1, 24 -; RV32-NEXT: mul a0, a0, a1 -; RV32-NEXT: add sp, sp, a0 -; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: ret -; -; RV64-LABEL: vpmerge_vv_v32f64: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: .cfi_def_cfa_offset 16 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 4 -; RV64-NEXT: sub sp, sp, a1 -; RV64-NEXT: addi a1, a0, 128 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vle64.v v24, (a1) -; RV64-NEXT: addi a3, a2, -16 -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vmv1r.v v1, v0 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bltu a2, a3, .LBB79_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a1, a3 -; RV64-NEXT: .LBB79_2: -; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vi v0, v1, 2 -; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma -; RV64-NEXT: li a0, 16 -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vmerge.vvm v24, v24, v16, v0 -; RV64-NEXT: bltu a2, a0, .LBB79_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: li a2, 16 -; RV64-NEXT: .LBB79_4: -; RV64-NEXT: vsetvli zero, a2, e64, m8, tu, ma -; RV64-NEXT: vmv1r.v v0, v1 -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 3 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; RV64-NEXT: vmerge.vvm v8, v8, v16, v0 -; RV64-NEXT: vmv8r.v v16, v24 -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 -; RV64-NEXT: add sp, sp, a0 -; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ret +; CHECK-LABEL: vpmerge_vv_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a1, a2, -16 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle64.v v16, (a0) +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 +; CHECK-NEXT: bltu a2, a0, .LBB79_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB79_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.merge.v32f64(<32 x i1> %m, <32 x double> %va, <32 x double> %vb, i32 %evl) ret <32 x double> %v } @@ -1176,22 +1111,20 @@ define <32 x double> @vpmerge_vf_v32f64(double %a, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a0, a2, .LBB80_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB80_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v24, 2 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfmerge.vfm v16, v16, fa0, v0 -; CHECK-NEXT: bltu a0, a1, .LBB80_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB80_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB80_4: +; CHECK-NEXT: .LBB80_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -1695,20 +1695,18 @@ ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a0) -; RV32-NEXT: li a0, 16 -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bltu a1, a0, .LBB79_2 -; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a2, 16 +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: bltu a1, a2, .LBB79_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a0, 16 ; RV32-NEXT: .LBB79_2: -; RV32-NEXT: li a0, 0 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t -; RV32-NEXT: bltu a1, a2, .LBB79_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a0, a2 -; RV32-NEXT: .LBB79_4: +; RV32-NEXT: addi a0, a1, -16 +; RV32-NEXT: sltu a1, a1, a0 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v24, 16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -1722,46 +1720,34 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: addi a1, a0, 128 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vle64.v v24, (a0) -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vle64.v v24, (a1) ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: li a3, 16 -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: mv a1, a2 -; RV64-NEXT: bltu a2, a3, .LBB79_2 -; RV64-NEXT: # %bb.1: +; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: li a1, 16 +; RV64-NEXT: mv a0, a2 +; RV64-NEXT: bltu a2, a1, .LBB79_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a0, 16 ; RV64-NEXT: .LBB79_2: -; RV64-NEXT: li a3, 0 -; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV64-NEXT: addi a0, a2, -16 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t -; RV64-NEXT: bltu a2, a0, .LBB79_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a3, a0 -; RV64-NEXT: .LBB79_4: +; RV64-NEXT: addi a0, a2, -16 +; RV64-NEXT: sltu a1, a2, a0 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1775,21 +1761,19 @@ ; RV32-NEXT: li a3, 32 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a1) -; RV32-NEXT: li a1, 16 +; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v24, v24, 3 -; RV32-NEXT: mv a3, a2 -; RV32-NEXT: bltu a2, a1, .LBB80_2 +; RV32-NEXT: mv a1, a2 +; RV32-NEXT: bltu a2, a3, .LBB80_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a3, 16 +; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB80_2: -; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV32-NEXT: addi a3, a2, -16 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t -; RV32-NEXT: bltu a2, a3, .LBB80_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB80_4: +; RV32-NEXT: addi a1, a2, -16 +; RV32-NEXT: sltu a2, a2, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma @@ -1803,52 +1787,50 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a3, a3, 4 +; RV64-NEXT: li a4, 10 +; RV64-NEXT: mul a3, a3, a4 ; RV64-NEXT: sub sp, sp, a3 ; RV64-NEXT: li a3, 32 ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV64-NEXT: vle32.v v24, (a1) +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV64-NEXT: vslidedown.vi v16, v24, 16 +; RV64-NEXT: vslidedown.vi v0, v24, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vsext.vf2 v8, v16 -; RV64-NEXT: vsext.vf2 v16, v24 +; RV64-NEXT: vsext.vf2 v16, v0 +; RV64-NEXT: vsll.vi v16, v16, 3 +; RV64-NEXT: vsext.vf2 v0, v24 ; RV64-NEXT: li a3, 16 -; RV64-NEXT: vsll.vi v24, v16, 3 +; RV64-NEXT: vsll.vi v24, v0, 3 ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: bltu a2, a3, .LBB80_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB80_2: -; RV64-NEXT: li a3, 0 -; RV64-NEXT: vsll.vi v16, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV64-NEXT: addi a1, a2, -16 -; RV64-NEXT: addi a4, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a4) # Unknown-size Folded Reload +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t -; RV64-NEXT: bltu a2, a1, .LBB80_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: .LBB80_4: +; RV64-NEXT: addi a1, a2, -16 +; RV64-NEXT: sltu a2, a2, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a1, a2, a1 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: li a1, 10 +; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1863,21 +1845,19 @@ ; RV32-NEXT: li a3, 32 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a1) -; RV32-NEXT: li a1, 16 +; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v24, v24, 3 -; RV32-NEXT: mv a3, a2 -; RV32-NEXT: bltu a2, a1, .LBB81_2 +; RV32-NEXT: mv a1, a2 +; RV32-NEXT: bltu a2, a3, .LBB81_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a3, 16 +; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB81_2: -; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV32-NEXT: addi a3, a2, -16 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t -; RV32-NEXT: bltu a2, a3, .LBB81_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB81_4: +; RV32-NEXT: addi a1, a2, -16 +; RV32-NEXT: sltu a2, a2, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma @@ -1891,53 +1871,51 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a3, a3, 4 +; RV64-NEXT: li a4, 10 +; RV64-NEXT: mul a3, a3, a4 ; RV64-NEXT: sub sp, sp, a3 ; RV64-NEXT: li a3, 32 ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV64-NEXT: vle32.v v24, (a1) +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vsext.vf2 v16, v24 +; RV64-NEXT: vsext.vf2 v0, v24 ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v24, v24, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vsext.vf2 v8, v24 +; RV64-NEXT: vsext.vf2 v16, v24 +; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: li a3, 16 -; RV64-NEXT: vsll.vi v24, v16, 3 +; RV64-NEXT: vsll.vi v24, v0, 3 ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: bltu a2, a3, .LBB81_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB81_2: -; RV64-NEXT: li a3, 0 -; RV64-NEXT: vsll.vi v16, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV64-NEXT: addi a1, a2, -16 -; RV64-NEXT: addi a4, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a4) # Unknown-size Folded Reload +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t -; RV64-NEXT: bltu a2, a1, .LBB81_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: .LBB81_4: +; RV64-NEXT: addi a1, a2, -16 +; RV64-NEXT: sltu a2, a2, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a1, a2, a1 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: li a1, 10 +; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1953,21 +1931,19 @@ ; RV32-NEXT: li a3, 32 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a1) -; RV32-NEXT: li a1, 16 +; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v24, v24, 3 -; RV32-NEXT: mv a3, a2 -; RV32-NEXT: bltu a2, a1, .LBB82_2 +; RV32-NEXT: mv a1, a2 +; RV32-NEXT: bltu a2, a3, .LBB82_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a3, 16 +; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB82_2: -; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV32-NEXT: addi a3, a2, -16 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t -; RV32-NEXT: bltu a2, a3, .LBB82_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB82_4: +; RV32-NEXT: addi a1, a2, -16 +; RV32-NEXT: sltu a2, a2, a1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma @@ -1981,53 +1957,51 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a3, a3, 4 +; RV64-NEXT: li a4, 10 +; RV64-NEXT: mul a3, a3, a4 ; RV64-NEXT: sub sp, sp, a3 ; RV64-NEXT: li a3, 32 ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV64-NEXT: vle32.v v24, (a1) +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vzext.vf2 v16, v24 +; RV64-NEXT: vzext.vf2 v0, v24 ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v24, v24, 16 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vzext.vf2 v8, v24 +; RV64-NEXT: vzext.vf2 v16, v24 +; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: li a3, 16 -; RV64-NEXT: vsll.vi v24, v16, 3 +; RV64-NEXT: vsll.vi v24, v0, 3 ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: bltu a2, a3, .LBB82_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB82_2: -; RV64-NEXT: li a3, 0 -; RV64-NEXT: vsll.vi v16, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV64-NEXT: addi a1, a2, -16 -; RV64-NEXT: addi a4, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a4) # Unknown-size Folded Reload +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t -; RV64-NEXT: bltu a2, a1, .LBB82_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: .LBB82_4: +; RV64-NEXT: addi a1, a2, -16 +; RV64-NEXT: sltu a2, a2, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a1, a2, a1 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: li a1, 10 +; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll @@ -285,24 +285,22 @@ define void @vpstore_v32f64(<32 x double> %val, <32 x double>* %ptr, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: bltu a1, a2, .LBB23_2 -; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bltu a1, a3, .LBB23_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: addi a3, a1, -16 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t -; CHECK-NEXT: bltu a1, a3, .LBB23_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB23_4: +; CHECK-NEXT: addi a2, a1, -16 +; CHECK-NEXT: sltu a1, a1, a2 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v32f64.p0v32f64(<32 x double> %val, <32 x double>* %ptr, <32 x i1> %m, i32 %evl) Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -154,59 +154,37 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: li a4, 24 -; CHECK-NEXT: mul a2, a2, a4 -; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: li a4, 128 -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vle8.v v24, (a1) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: addi a1, a1, 128 -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: bltu a3, a4, .LBB11_2 -; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: vmv1r.v v2, v8 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: .LBB11_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: addi a0, a1, 128 +; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a3, -128 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: bltu a3, a0, .LBB11_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a0 -; CHECK-NEXT: .LBB11_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma +; CHECK-NEXT: sltu a4, a3, a0 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: vle8.v v16, (a1) +; CHECK-NEXT: and a0, a4, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0 +; CHECK-NEXT: bltu a3, a2, .LBB11_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 128 +; CHECK-NEXT: .LBB11_2: +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: vmv8r.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -423,59 +401,45 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a3, a2, -16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a2, a3, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: addi a1, a2, -16 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a3, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v24, 2 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 -; CHECK-NEXT: bltu a2, a0, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a0, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -606,42 +570,41 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vle32.v v24, (a0) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, a2, -32 +; CHECK-NEXT: sltu a3, a2, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 +; CHECK-NEXT: li a3, 32 +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: addi a4, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill ; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vi v0, v0, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 ; CHECK-NEXT: bltu a2, a3, .LBB35_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: .LBB35_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: addi a0, a2, -32 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 -; CHECK-NEXT: bltu a2, a0, .LBB35_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a0 -; CHECK-NEXT: .LBB35_4: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vi v0, v0, 4 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 +; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll @@ -154,23 +154,21 @@ ; CHECK-LABEL: vzext_v32i64_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: bltu a0, a2, .LBB12_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vzext.vf2 v16, v24, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB12_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB12_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vzext.vf2 v24, v8, v0.t @@ -183,21 +181,19 @@ define <32 x i64> @vzext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v32i64_v32i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bltu a0, a2, .LBB13_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a2, a0, a1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vzext.vf2 v16, v24 -; CHECK-NEXT: bltu a0, a1, .LBB13_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB13_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v24, v8 ; CHECK-NEXT: vmv.v.v v8, v24 Index: llvm/test/CodeGen/RISCV/rvv/floor-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -737,67 +737,59 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v25, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v2, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 2 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -809,37 +801,34 @@ ; CHECK-LABEL: vp_floor_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bltu a0, a1, .LBB33_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 +; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 2 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/round-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -737,67 +737,59 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v25, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v2, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 4 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -809,37 +801,34 @@ ; CHECK-LABEL: vp_round_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bltu a0, a1, .LBB33_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 +; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 4 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -737,67 +737,59 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v25, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v2, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -809,37 +801,34 @@ ; CHECK-LABEL: vp_roundeven_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bltu a0, a1, .LBB33_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 +; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 0 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -737,67 +737,59 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v25, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v2, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a3) ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfabs.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t +; CHECK-NEXT: vmflt.vf v2, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a2, 1 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v2 +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v24, v16, ft0, v0.t +; CHECK-NEXT: vmflt.vf v1, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -809,37 +801,34 @@ ; CHECK-LABEL: vp_roundtozero_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bltu a0, a1, .LBB33_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: lui a3, %hi(.LCPI33_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a3) -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 +; CHECK-NEXT: vfabs.v v24, v16 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a2, 1 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t ; CHECK-NEXT: fsrm a2 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 +; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB33_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft0 ; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t +; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t ; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -1086,43 +1086,51 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: srli a1, a3, 1 -; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, ma -; CHECK-NEXT: slli a5, a3, 3 -; CHECK-NEXT: add a5, a0, a5 -; CHECK-NEXT: vl8re16.v v24, (a5) -; CHECK-NEXT: slli a3, a3, 2 -; CHECK-NEXT: sub a5, a2, a3 -; CHECK-NEXT: vslidedown.vx v0, v0, a1 -; CHECK-NEXT: bltu a2, a5, .LBB85_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: .LBB85_2: -; CHECK-NEXT: vl8re16.v v8, (a0) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re16.v v8, (a3) +; CHECK-NEXT: slli a3, a1, 2 +; CHECK-NEXT: sub a4, a2, a3 +; CHECK-NEXT: sltu a5, a2, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 +; CHECK-NEXT: srli a1, a1, 1 +; CHECK-NEXT: vl8re16.v v0, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vslidedown.vx v0, v24, a1 ; CHECK-NEXT: vsetvli zero, a4, e16, m8, ta, ma -; CHECK-NEXT: vmfeq.vv v2, v16, v24, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB85_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: vmfeq.vv v1, v16, v8, v0.t +; CHECK-NEXT: bltu a2, a3, .LBB85_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB85_4: +; CHECK-NEXT: .LBB85_2: ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t +; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: add a0, a1, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma -; CHECK-NEXT: vslideup.vx v16, v2, a1 +; CHECK-NEXT: vslideup.vx v16, v1, a1 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2213,110 +2221,80 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: li a3, 48 +; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: li a3, 40 ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: srli a4, a3, 2 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v1, v0, a4 ; CHECK-NEXT: srli a1, a3, 3 -; CHECK-NEXT: slli a5, a3, 3 -; CHECK-NEXT: slli a7, a3, 1 -; CHECK-NEXT: add a4, a2, a5 -; CHECK-NEXT: mv t0, a6 -; CHECK-NEXT: bltu a6, a7, .LBB171_2 +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a1 +; CHECK-NEXT: li a5, 24 +; CHECK-NEXT: slli t1, a3, 3 +; CHECK-NEXT: add a7, a2, t1 +; CHECK-NEXT: vl8re64.v v8, (a7) +; CHECK-NEXT: mul t0, a3, a5 +; CHECK-NEXT: slli a5, a3, 1 +; CHECK-NEXT: slli t2, a3, 4 +; CHECK-NEXT: mv a7, a6 +; CHECK-NEXT: bltu a6, a5, .LBB171_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv t0, a7 +; CHECK-NEXT: mv a7, a5 ; CHECK-NEXT: .LBB171_2: -; CHECK-NEXT: li t1, 0 -; CHECK-NEXT: vsetvli t2, zero, e8, mf4, ta, ma -; CHECK-NEXT: vl8re64.v v16, (a4) -; CHECK-NEXT: srli a4, a3, 2 -; CHECK-NEXT: sub t2, t0, a3 -; CHECK-NEXT: vslidedown.vx v0, v24, a1 -; CHECK-NEXT: bltu t0, t2, .LBB171_4 +; CHECK-NEXT: add t0, a2, t0 +; CHECK-NEXT: add t1, a0, t1 +; CHECK-NEXT: add t2, a2, t2 +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: sub a2, a7, a3 +; CHECK-NEXT: sltu t3, a7, a2 +; CHECK-NEXT: addi t3, t3, -1 +; CHECK-NEXT: and a2, t3, a2 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 5 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vmfeq.vv v2, v16, v8, v0.t +; CHECK-NEXT: bltu a7, a3, .LBB171_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv t1, t2 +; CHECK-NEXT: mv a7, a3 ; CHECK-NEXT: .LBB171_4: -; CHECK-NEXT: li t2, 24 -; CHECK-NEXT: vsetvli t3, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v1, v24, a4 -; CHECK-NEXT: vl8re64.v v8, (a2) -; CHECK-NEXT: csrr t3, vlenb -; CHECK-NEXT: slli t3, t3, 3 -; CHECK-NEXT: add t3, sp, t3 -; CHECK-NEXT: addi t3, t3, 16 -; CHECK-NEXT: vs8r.v v8, (t3) # Unknown-size Folded Spill -; CHECK-NEXT: slli t3, a3, 4 -; CHECK-NEXT: vsetvli zero, t1, e64, m8, ta, ma -; CHECK-NEXT: csrr t1, vlenb -; CHECK-NEXT: slli t1, t1, 4 -; CHECK-NEXT: add t1, sp, t1 -; CHECK-NEXT: addi t1, t1, 16 -; CHECK-NEXT: vl8re8.v v8, (t1) # Unknown-size Folded Reload -; CHECK-NEXT: vmfeq.vv v2, v8, v16, v0.t -; CHECK-NEXT: bltu t0, a3, .LBB171_6 -; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv t0, a3 -; CHECK-NEXT: .LBB171_6: -; CHECK-NEXT: li t1, 0 -; CHECK-NEXT: mul t4, a3, t2 -; CHECK-NEXT: add t2, a2, t3 -; CHECK-NEXT: vsetvli zero, t0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr t0, vlenb -; CHECK-NEXT: li t3, 24 -; CHECK-NEXT: mul t0, t0, t3 -; CHECK-NEXT: add t0, sp, t0 -; CHECK-NEXT: addi t0, t0, 16 -; CHECK-NEXT: vl8re8.v v24, (t0) # Unknown-size Folded Reload -; CHECK-NEXT: csrr t0, vlenb -; CHECK-NEXT: slli t0, t0, 3 -; CHECK-NEXT: add t0, sp, t0 -; CHECK-NEXT: addi t0, t0, 16 -; CHECK-NEXT: vl8re8.v v8, (t0) # Unknown-size Folded Reload -; CHECK-NEXT: vmfeq.vv v17, v24, v8, v0.t -; CHECK-NEXT: sub t0, a6, a7 -; CHECK-NEXT: add a7, a1, a1 -; CHECK-NEXT: bltu a6, t0, .LBB171_8 -; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: mv t1, t0 -; CHECK-NEXT: .LBB171_8: -; CHECK-NEXT: add a2, a2, t4 -; CHECK-NEXT: vl8re64.v v8, (t2) -; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v17, v1, a1 +; CHECK-NEXT: vl8re64.v v8, (t0) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 5 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v8, (t1) +; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: li t0, 24 -; CHECK-NEXT: mul a6, a6, t0 -; CHECK-NEXT: add a6, sp, a6 -; CHECK-NEXT: addi a6, a6, 16 -; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: slli a6, a6, 4 -; CHECK-NEXT: add a6, sp, a6 -; CHECK-NEXT: addi a6, a6, 16 -; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: add a0, a0, a5 -; CHECK-NEXT: vsetvli zero, a7, e8, mf2, tu, ma -; CHECK-NEXT: vslideup.vx v17, v2, a1 -; CHECK-NEXT: mv a5, t1 -; CHECK-NEXT: bltu t1, a3, .LBB171_10 -; CHECK-NEXT: # %bb.9: -; CHECK-NEXT: mv a5, a3 -; CHECK-NEXT: .LBB171_10: -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vx v16, v1, a1 -; CHECK-NEXT: vl8re64.v v8, (a2) +; CHECK-NEXT: mul a2, a2, t0 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v8, (t2) ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: add a2, sp, a2 @@ -2325,10 +2303,10 @@ ; CHECK-NEXT: vl8re64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a2, 24 +; CHECK-NEXT: li a2, 40 ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 @@ -2338,33 +2316,59 @@ ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmfeq.vv v18, v8, v24, v0.t -; CHECK-NEXT: add a0, a4, a1 +; CHECK-NEXT: vmfeq.vv v18, v24, v8, v0.t +; CHECK-NEXT: add a0, a1, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma -; CHECK-NEXT: sub a0, t1, a3 -; CHECK-NEXT: vslideup.vx v17, v18, a4 -; CHECK-NEXT: bltu t1, a0, .LBB171_12 -; CHECK-NEXT: # %bb.11: -; CHECK-NEXT: mv a6, a0 -; CHECK-NEXT: .LBB171_12: -; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: sub a0, a6, a5 +; CHECK-NEXT: sltu a2, a6, a0 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: vslideup.vx v18, v2, a1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a3, .LBB171_6 +; CHECK-NEXT: # %bb.5: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB171_6: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t +; CHECK-NEXT: add a2, a4, a1 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma +; CHECK-NEXT: vslideup.vx v18, v16, a4 +; CHECK-NEXT: sub a2, a0, a3 +; CHECK-NEXT: sltu a0, a0, a2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a2 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a2, 24 +; CHECK-NEXT: mul a0, a0, a2 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: slli a0, a1, 1 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma -; CHECK-NEXT: vslideup.vx v17, v16, a0 -; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vslideup.vx v18, v16, a0 +; CHECK-NEXT: vmv1r.v v0, v18 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: li a1, 48 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -1161,51 +1161,37 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a4, 24 -; CHECK-NEXT: mul a1, a1, a4 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a4, a0, a1 -; CHECK-NEXT: vl8r.v v24, (a4) -; CHECK-NEXT: csrr a4, vlenb -; CHECK-NEXT: slli a4, a4, 3 -; CHECK-NEXT: add a4, sp, a4 -; CHECK-NEXT: addi a4, a4, 16 -; CHECK-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a4, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v25, (a2) -; CHECK-NEXT: sub a4, a3, a1 -; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bltu a3, a4, .LBB96_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a4 -; CHECK-NEXT: .LBB96_2: -; CHECK-NEXT: vl8r.v v8, (a0) +; CHECK-NEXT: vl8r.v v8, (a4) +; CHECK-NEXT: vl8r.v v0, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v25 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-NEXT: sub a0, a3, a1 +; CHECK-NEXT: vlm.v v0, (a2) +; CHECK-NEXT: sltu a2, a3, a0 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t -; CHECK-NEXT: bltu a3, a1, .LBB96_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a3, a1, .LBB96_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB96_4: +; CHECK-NEXT: .LBB96_2: ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload @@ -1215,8 +1201,7 @@ ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1227,27 +1212,26 @@ define @icmp_eq_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB97_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB97_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB97_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB97_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB97_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmseq.vx v8, v16, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i8 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1258,27 +1242,26 @@ define @icmp_eq_vx_swap_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_swap_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB98_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB98_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB98_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vmseq.vx v25, v8, a0, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB98_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB98_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmseq.vx v8, v16, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i8 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2399,43 +2382,51 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: srli a1, a3, 2 -; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; CHECK-NEXT: slli a5, a3, 3 -; CHECK-NEXT: add a5, a0, a5 -; CHECK-NEXT: vl8re32.v v24, (a5) -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: sub a5, a2, a3 -; CHECK-NEXT: vslidedown.vx v0, v0, a1 -; CHECK-NEXT: bltu a2, a5, .LBB189_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: .LBB189_2: -; CHECK-NEXT: vl8re32.v v8, (a0) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re32.v v8, (a3) +; CHECK-NEXT: slli a3, a1, 1 +; CHECK-NEXT: sub a4, a2, a3 +; CHECK-NEXT: sltu a5, a2, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: vl8re32.v v0, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vslidedown.vx v0, v24, a1 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma -; CHECK-NEXT: vmseq.vv v2, v16, v24, v0.t -; CHECK-NEXT: bltu a2, a3, .LBB189_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t +; CHECK-NEXT: bltu a2, a3, .LBB189_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB189_4: +; CHECK-NEXT: .LBB189_2: ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmseq.vv v16, v24, v8, v0.t +; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t ; CHECK-NEXT: add a0, a1, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma -; CHECK-NEXT: vslideup.vx v16, v2, a1 +; CHECK-NEXT: vslideup.vx v16, v1, a1 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2447,23 +2438,21 @@ ; CHECK-LABEL: icmp_eq_vx_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a2, a3, 2 -; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: sub a5, a1, a3 +; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 -; CHECK-NEXT: bltu a1, a5, .LBB190_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: .LBB190_2: +; CHECK-NEXT: slli a3, a3, 1 +; CHECK-NEXT: sub a4, a1, a3 +; CHECK-NEXT: sltu a5, a1, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a3, .LBB190_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a3, .LBB190_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB190_4: +; CHECK-NEXT: .LBB190_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t @@ -2482,23 +2471,21 @@ ; CHECK-LABEL: icmp_eq_vx_swap_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a2, a3, 2 -; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: sub a5, a1, a3 +; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 -; CHECK-NEXT: bltu a1, a5, .LBB191_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: .LBB191_2: +; CHECK-NEXT: slli a3, a3, 1 +; CHECK-NEXT: sub a4, a1, a3 +; CHECK-NEXT: sltu a5, a1, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a3, .LBB191_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a3, .LBB191_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB191_4: +; CHECK-NEXT: .LBB191_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t Index: llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -766,20 +766,18 @@ ; CHECK-RV32-LABEL: strided_load_nxv16f64: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: vmv1r.v v8, v0 -; CHECK-RV32-NEXT: li a2, 0 ; CHECK-RV32-NEXT: csrr a4, vlenb -; CHECK-RV32-NEXT: sub a6, a3, a4 +; CHECK-RV32-NEXT: sub a2, a3, a4 +; CHECK-RV32-NEXT: sltu a5, a3, a2 +; CHECK-RV32-NEXT: addi a5, a5, -1 +; CHECK-RV32-NEXT: and a2, a5, a2 ; CHECK-RV32-NEXT: srli a5, a4, 3 -; CHECK-RV32-NEXT: bltu a3, a6, .LBB42_2 -; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a2, a6 -; CHECK-RV32-NEXT: .LBB42_2: ; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma -; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a5 -; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_4 -; CHECK-RV32-NEXT: # %bb.3: +; CHECK-RV32-NEXT: vslidedown.vx v0, v0, a5 +; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_2 +; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a3, a4 -; CHECK-RV32-NEXT: .LBB42_4: +; CHECK-RV32-NEXT: .LBB42_2: ; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -792,20 +790,18 @@ ; CHECK-RV64-LABEL: strided_load_nxv16f64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: vmv1r.v v8, v0 -; CHECK-RV64-NEXT: li a3, 0 ; CHECK-RV64-NEXT: csrr a4, vlenb -; CHECK-RV64-NEXT: sub a6, a2, a4 +; CHECK-RV64-NEXT: sub a3, a2, a4 +; CHECK-RV64-NEXT: sltu a5, a2, a3 +; CHECK-RV64-NEXT: addi a5, a5, -1 +; CHECK-RV64-NEXT: and a3, a5, a3 ; CHECK-RV64-NEXT: srli a5, a4, 3 -; CHECK-RV64-NEXT: bltu a2, a6, .LBB42_2 -; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a3, a6 -; CHECK-RV64-NEXT: .LBB42_2: ; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma -; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a5 -; CHECK-RV64-NEXT: bltu a2, a4, .LBB42_4 -; CHECK-RV64-NEXT: # %bb.3: +; CHECK-RV64-NEXT: vslidedown.vx v0, v0, a5 +; CHECK-RV64-NEXT: bltu a2, a4, .LBB42_2 +; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a2, a4 -; CHECK-RV64-NEXT: .LBB42_4: +; CHECK-RV64-NEXT: .LBB42_2: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu @@ -822,16 +818,14 @@ ; CHECK-RV32-LABEL: strided_load_nxv16f64_allones_mask: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: csrr a4, vlenb -; CHECK-RV32-NEXT: sub a5, a3, a4 -; CHECK-RV32-NEXT: li a2, 0 -; CHECK-RV32-NEXT: bltu a3, a5, .LBB43_2 +; CHECK-RV32-NEXT: sub a2, a3, a4 +; CHECK-RV32-NEXT: sltu a5, a3, a2 +; CHECK-RV32-NEXT: addi a5, a5, -1 +; CHECK-RV32-NEXT: and a2, a5, a2 +; CHECK-RV32-NEXT: bltu a3, a4, .LBB43_2 ; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a2, a5 -; CHECK-RV32-NEXT: .LBB43_2: -; CHECK-RV32-NEXT: bltu a3, a4, .LBB43_4 -; CHECK-RV32-NEXT: # %bb.3: ; CHECK-RV32-NEXT: mv a3, a4 -; CHECK-RV32-NEXT: .LBB43_4: +; CHECK-RV32-NEXT: .LBB43_2: ; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma @@ -843,16 +837,14 @@ ; CHECK-RV64-LABEL: strided_load_nxv16f64_allones_mask: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: csrr a4, vlenb -; CHECK-RV64-NEXT: sub a5, a2, a4 -; CHECK-RV64-NEXT: li a3, 0 -; CHECK-RV64-NEXT: bltu a2, a5, .LBB43_2 +; CHECK-RV64-NEXT: sub a3, a2, a4 +; CHECK-RV64-NEXT: sltu a5, a2, a3 +; CHECK-RV64-NEXT: addi a5, a5, -1 +; CHECK-RV64-NEXT: and a3, a5, a3 +; CHECK-RV64-NEXT: bltu a2, a4, .LBB43_2 ; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a3, a5 -; CHECK-RV64-NEXT: .LBB43_2: -; CHECK-RV64-NEXT: bltu a2, a4, .LBB43_4 -; CHECK-RV64-NEXT: # %bb.3: ; CHECK-RV64-NEXT: mv a2, a4 -; CHECK-RV64-NEXT: .LBB43_4: +; CHECK-RV64-NEXT: .LBB43_2: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma @@ -875,48 +867,44 @@ define @strided_load_nxv17f64(double* %ptr, i64 %stride, %mask, i32 zeroext %evl, * %hi_ptr) { ; CHECK-RV32-LABEL: strided_load_nxv17f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: csrr a2, vlenb -; CHECK-RV32-NEXT: slli a7, a2, 1 +; CHECK-RV32-NEXT: csrr a5, vlenb +; CHECK-RV32-NEXT: slli a7, a5, 1 ; CHECK-RV32-NEXT: vmv1r.v v8, v0 -; CHECK-RV32-NEXT: mv a5, a3 +; CHECK-RV32-NEXT: mv a2, a3 ; CHECK-RV32-NEXT: bltu a3, a7, .LBB44_2 ; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a5, a7 +; CHECK-RV32-NEXT: mv a2, a7 ; CHECK-RV32-NEXT: .LBB44_2: -; CHECK-RV32-NEXT: sub a6, a5, a2 -; CHECK-RV32-NEXT: li t0, 0 -; CHECK-RV32-NEXT: bltu a5, a6, .LBB44_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv t0, a6 -; CHECK-RV32-NEXT: .LBB44_4: -; CHECK-RV32-NEXT: srli a6, a2, 3 +; CHECK-RV32-NEXT: sub a6, a2, a5 +; CHECK-RV32-NEXT: sltu t0, a2, a6 +; CHECK-RV32-NEXT: addi t0, t0, -1 +; CHECK-RV32-NEXT: and t0, t0, a6 +; CHECK-RV32-NEXT: srli a6, a5, 3 ; CHECK-RV32-NEXT: vsetvli t1, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a6 -; CHECK-RV32-NEXT: mv a6, a5 -; CHECK-RV32-NEXT: bltu a5, a2, .LBB44_6 -; CHECK-RV32-NEXT: # %bb.5: ; CHECK-RV32-NEXT: mv a6, a2 -; CHECK-RV32-NEXT: .LBB44_6: +; CHECK-RV32-NEXT: bltu a2, a5, .LBB44_4 +; CHECK-RV32-NEXT: # %bb.3: +; CHECK-RV32-NEXT: mv a6, a5 +; CHECK-RV32-NEXT: .LBB44_4: ; CHECK-RV32-NEXT: mul t1, a6, a1 ; CHECK-RV32-NEXT: add t1, a0, t1 ; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu ; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t -; CHECK-RV32-NEXT: li t0, 0 -; CHECK-RV32-NEXT: sub t1, a3, a7 -; CHECK-RV32-NEXT: srli a7, a2, 2 -; CHECK-RV32-NEXT: bltu a3, t1, .LBB44_8 -; CHECK-RV32-NEXT: # %bb.7: -; CHECK-RV32-NEXT: mv t0, t1 -; CHECK-RV32-NEXT: .LBB44_8: -; CHECK-RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a7 -; CHECK-RV32-NEXT: bltu t0, a2, .LBB44_10 -; CHECK-RV32-NEXT: # %bb.9: -; CHECK-RV32-NEXT: mv t0, a2 -; CHECK-RV32-NEXT: .LBB44_10: -; CHECK-RV32-NEXT: mul a2, a5, a1 +; CHECK-RV32-NEXT: sub a7, a3, a7 +; CHECK-RV32-NEXT: sltu a3, a3, a7 +; CHECK-RV32-NEXT: addi a3, a3, -1 +; CHECK-RV32-NEXT: and a3, a3, a7 +; CHECK-RV32-NEXT: bltu a3, a5, .LBB44_6 +; CHECK-RV32-NEXT: # %bb.5: +; CHECK-RV32-NEXT: mv a3, a5 +; CHECK-RV32-NEXT: .LBB44_6: +; CHECK-RV32-NEXT: srli a5, a5, 2 +; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf2, ta, ma +; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a5 +; CHECK-RV32-NEXT: mul a2, a2, a1 ; CHECK-RV32-NEXT: add a2, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-RV32-NEXT: vlse64.v v24, (a2), a1, v0.t ; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, mu ; CHECK-RV32-NEXT: vmv1r.v v0, v8 @@ -926,49 +914,45 @@ ; ; CHECK-RV64-LABEL: strided_load_nxv17f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: csrr a4, vlenb -; CHECK-RV64-NEXT: slli a7, a4, 1 +; CHECK-RV64-NEXT: csrr a5, vlenb +; CHECK-RV64-NEXT: slli a7, a5, 1 ; CHECK-RV64-NEXT: vmv1r.v v8, v0 -; CHECK-RV64-NEXT: mv a5, a2 +; CHECK-RV64-NEXT: mv a4, a2 ; CHECK-RV64-NEXT: bltu a2, a7, .LBB44_2 ; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a5, a7 +; CHECK-RV64-NEXT: mv a4, a7 ; CHECK-RV64-NEXT: .LBB44_2: -; CHECK-RV64-NEXT: sub a6, a5, a4 -; CHECK-RV64-NEXT: li t0, 0 -; CHECK-RV64-NEXT: bltu a5, a6, .LBB44_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv t0, a6 -; CHECK-RV64-NEXT: .LBB44_4: -; CHECK-RV64-NEXT: srli a6, a4, 3 +; CHECK-RV64-NEXT: sub a6, a4, a5 +; CHECK-RV64-NEXT: sltu t0, a4, a6 +; CHECK-RV64-NEXT: addi t0, t0, -1 +; CHECK-RV64-NEXT: and t0, t0, a6 +; CHECK-RV64-NEXT: srli a6, a5, 3 ; CHECK-RV64-NEXT: vsetvli t1, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a6 -; CHECK-RV64-NEXT: mv a6, a5 -; CHECK-RV64-NEXT: bltu a5, a4, .LBB44_6 -; CHECK-RV64-NEXT: # %bb.5: ; CHECK-RV64-NEXT: mv a6, a4 -; CHECK-RV64-NEXT: .LBB44_6: +; CHECK-RV64-NEXT: bltu a4, a5, .LBB44_4 +; CHECK-RV64-NEXT: # %bb.3: +; CHECK-RV64-NEXT: mv a6, a5 +; CHECK-RV64-NEXT: .LBB44_4: ; CHECK-RV64-NEXT: mul t1, a6, a1 ; CHECK-RV64-NEXT: add t1, a0, t1 ; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu ; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t -; CHECK-RV64-NEXT: li t0, 0 -; CHECK-RV64-NEXT: sub t1, a2, a7 -; CHECK-RV64-NEXT: srli a7, a4, 2 -; CHECK-RV64-NEXT: bltu a2, t1, .LBB44_8 -; CHECK-RV64-NEXT: # %bb.7: -; CHECK-RV64-NEXT: mv t0, t1 -; CHECK-RV64-NEXT: .LBB44_8: -; CHECK-RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma -; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a7 -; CHECK-RV64-NEXT: bltu t0, a4, .LBB44_10 -; CHECK-RV64-NEXT: # %bb.9: -; CHECK-RV64-NEXT: mv t0, a4 -; CHECK-RV64-NEXT: .LBB44_10: -; CHECK-RV64-NEXT: mul a2, a5, a1 -; CHECK-RV64-NEXT: add a2, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu -; CHECK-RV64-NEXT: vlse64.v v24, (a2), a1, v0.t +; CHECK-RV64-NEXT: sub a7, a2, a7 +; CHECK-RV64-NEXT: sltu a2, a2, a7 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a7 +; CHECK-RV64-NEXT: bltu a2, a5, .LBB44_6 +; CHECK-RV64-NEXT: # %bb.5: +; CHECK-RV64-NEXT: mv a2, a5 +; CHECK-RV64-NEXT: .LBB44_6: +; CHECK-RV64-NEXT: srli a5, a5, 2 +; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma +; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a5 +; CHECK-RV64-NEXT: mul a4, a4, a1 +; CHECK-RV64-NEXT: add a4, a0, a4 +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vlse64.v v24, (a4), a1, v0.t ; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, mu ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t Index: llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -621,51 +621,47 @@ define void @strided_store_nxv16f64( %v, double* %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_store_nxv16f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: csrr a3, vlenb -; CHECK-RV32-NEXT: mv a4, a2 -; CHECK-RV32-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-RV32-NEXT: csrr a4, vlenb +; CHECK-RV32-NEXT: mv a3, a2 +; CHECK-RV32-NEXT: bltu a2, a4, .LBB34_2 ; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a4, a3 +; CHECK-RV32-NEXT: mv a3, a4 ; CHECK-RV32-NEXT: .LBB34_2: -; CHECK-RV32-NEXT: li a5, 0 -; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t -; CHECK-RV32-NEXT: srli a6, a3, 3 -; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf4, ta, ma -; CHECK-RV32-NEXT: sub a3, a2, a3 -; CHECK-RV32-NEXT: vslidedown.vx v0, v0, a6 -; CHECK-RV32-NEXT: bltu a2, a3, .LBB34_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv a5, a3 -; CHECK-RV32-NEXT: .LBB34_4: -; CHECK-RV32-NEXT: mul a2, a4, a1 -; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-RV32-NEXT: sub a5, a2, a4 +; CHECK-RV32-NEXT: sltu a2, a2, a5 +; CHECK-RV32-NEXT: addi a2, a2, -1 +; CHECK-RV32-NEXT: and a2, a2, a5 +; CHECK-RV32-NEXT: srli a4, a4, 3 +; CHECK-RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; CHECK-RV32-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-RV32-NEXT: mul a3, a3, a1 +; CHECK-RV32-NEXT: add a0, a0, a3 +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_store_nxv16f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: csrr a3, vlenb -; CHECK-RV64-NEXT: mv a4, a2 -; CHECK-RV64-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-RV64-NEXT: csrr a4, vlenb +; CHECK-RV64-NEXT: mv a3, a2 +; CHECK-RV64-NEXT: bltu a2, a4, .LBB34_2 ; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a4, a3 +; CHECK-RV64-NEXT: mv a3, a4 ; CHECK-RV64-NEXT: .LBB34_2: -; CHECK-RV64-NEXT: li a5, 0 -; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t -; CHECK-RV64-NEXT: srli a6, a3, 3 -; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf4, ta, ma -; CHECK-RV64-NEXT: sub a3, a2, a3 -; CHECK-RV64-NEXT: vslidedown.vx v0, v0, a6 -; CHECK-RV64-NEXT: bltu a2, a3, .LBB34_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv a5, a3 -; CHECK-RV64-NEXT: .LBB34_4: -; CHECK-RV64-NEXT: mul a2, a4, a1 -; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-RV64-NEXT: sub a5, a2, a4 +; CHECK-RV64-NEXT: sltu a2, a2, a5 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a5 +; CHECK-RV64-NEXT: srli a4, a4, 3 +; CHECK-RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; CHECK-RV64-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-RV64-NEXT: mul a3, a3, a1 +; CHECK-RV64-NEXT: add a0, a0, a3 +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv16f64.p0f64.i32( %v, double* %ptr, i32 %stride, %mask, i32 %evl) @@ -681,17 +677,15 @@ ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a3, a4 ; CHECK-RV32-NEXT: .LBB35_2: -; CHECK-RV32-NEXT: li a5, 0 ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV32-NEXT: sub a4, a2, a4 ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1 -; CHECK-RV32-NEXT: bltu a2, a4, .LBB35_4 -; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv a5, a4 -; CHECK-RV32-NEXT: .LBB35_4: -; CHECK-RV32-NEXT: mul a2, a3, a1 -; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-RV32-NEXT: sub a4, a2, a4 +; CHECK-RV32-NEXT: sltu a2, a2, a4 +; CHECK-RV32-NEXT: addi a2, a2, -1 +; CHECK-RV32-NEXT: and a2, a2, a4 +; CHECK-RV32-NEXT: mul a3, a3, a1 +; CHECK-RV32-NEXT: add a0, a0, a3 +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV32-NEXT: ret ; @@ -703,17 +697,15 @@ ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a3, a4 ; CHECK-RV64-NEXT: .LBB35_2: -; CHECK-RV64-NEXT: li a5, 0 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-RV64-NEXT: sub a4, a2, a4 ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1 -; CHECK-RV64-NEXT: bltu a2, a4, .LBB35_4 -; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv a5, a4 -; CHECK-RV64-NEXT: .LBB35_4: -; CHECK-RV64-NEXT: mul a2, a3, a1 -; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-RV64-NEXT: sub a4, a2, a4 +; CHECK-RV64-NEXT: sltu a2, a2, a4 +; CHECK-RV64-NEXT: addi a2, a2, -1 +; CHECK-RV64-NEXT: and a2, a2, a4 +; CHECK-RV64-NEXT: mul a3, a3, a1 +; CHECK-RV64-NEXT: add a0, a0, a3 +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement poison, i1 true, i32 0 @@ -728,55 +720,51 @@ define void @strided_store_nxv17f64( %v, double* %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_store_nxv17f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: addi sp, sp, -16 -; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32-NEXT: csrr a4, vlenb -; CHECK-RV32-NEXT: slli a4, a4, 3 -; CHECK-RV32-NEXT: sub sp, sp, a4 ; CHECK-RV32-NEXT: csrr a4, vlenb -; CHECK-RV32-NEXT: slli a7, a4, 1 +; CHECK-RV32-NEXT: slli a6, a4, 1 ; CHECK-RV32-NEXT: vmv1r.v v24, v0 -; CHECK-RV32-NEXT: addi a5, sp, 16 -; CHECK-RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill -; CHECK-RV32-NEXT: mv a6, a3 -; CHECK-RV32-NEXT: bltu a3, a7, .LBB36_2 +; CHECK-RV32-NEXT: mv a5, a3 +; CHECK-RV32-NEXT: bltu a3, a6, .LBB36_2 ; CHECK-RV32-NEXT: # %bb.1: -; CHECK-RV32-NEXT: mv a6, a7 -; CHECK-RV32-NEXT: .LBB36_2: ; CHECK-RV32-NEXT: mv a5, a6 -; CHECK-RV32-NEXT: bltu a6, a4, .LBB36_4 +; CHECK-RV32-NEXT: .LBB36_2: +; CHECK-RV32-NEXT: mv a7, a5 +; CHECK-RV32-NEXT: bltu a5, a4, .LBB36_4 ; CHECK-RV32-NEXT: # %bb.3: -; CHECK-RV32-NEXT: mv a5, a4 +; CHECK-RV32-NEXT: mv a7, a4 ; CHECK-RV32-NEXT: .LBB36_4: -; CHECK-RV32-NEXT: li t0, 0 -; CHECK-RV32-NEXT: vl8re64.v v16, (a0) -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: csrr t0, vlenb +; CHECK-RV32-NEXT: slli t0, t0, 3 +; CHECK-RV32-NEXT: sub sp, sp, t0 +; CHECK-RV32-NEXT: vl8re64.v v0, (a0) +; CHECK-RV32-NEXT: addi a0, sp, 16 +; CHECK-RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v24 ; CHECK-RV32-NEXT: vsse64.v v8, (a1), a2, v0.t -; CHECK-RV32-NEXT: sub a7, a3, a7 -; CHECK-RV32-NEXT: srli a0, a4, 2 -; CHECK-RV32-NEXT: bltu a3, a7, .LBB36_6 +; CHECK-RV32-NEXT: sub a0, a5, a4 +; CHECK-RV32-NEXT: sltu t0, a5, a0 +; CHECK-RV32-NEXT: addi t0, t0, -1 +; CHECK-RV32-NEXT: and a0, t0, a0 +; CHECK-RV32-NEXT: srli t0, a4, 3 +; CHECK-RV32-NEXT: vsetvli t1, zero, e8, mf4, ta, ma +; CHECK-RV32-NEXT: vslidedown.vx v0, v24, t0 +; CHECK-RV32-NEXT: mul a7, a7, a2 +; CHECK-RV32-NEXT: add a7, a1, a7 +; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-RV32-NEXT: sub a0, a3, a6 +; CHECK-RV32-NEXT: sltu a3, a3, a0 +; CHECK-RV32-NEXT: addi a3, a3, -1 +; CHECK-RV32-NEXT: and a0, a3, a0 +; CHECK-RV32-NEXT: vsse64.v v16, (a7), a2, v0.t +; CHECK-RV32-NEXT: bltu a0, a4, .LBB36_6 ; CHECK-RV32-NEXT: # %bb.5: -; CHECK-RV32-NEXT: mv t0, a7 +; CHECK-RV32-NEXT: mv a0, a4 ; CHECK-RV32-NEXT: .LBB36_6: -; CHECK-RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-RV32-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-RV32-NEXT: bltu t0, a4, .LBB36_8 -; CHECK-RV32-NEXT: # %bb.7: -; CHECK-RV32-NEXT: mv t0, a4 -; CHECK-RV32-NEXT: .LBB36_8: -; CHECK-RV32-NEXT: li a0, 0 -; CHECK-RV32-NEXT: mul a3, a6, a2 -; CHECK-RV32-NEXT: add a7, a1, a3 -; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma -; CHECK-RV32-NEXT: sub a3, a6, a4 -; CHECK-RV32-NEXT: vsse64.v v16, (a7), a2, v0.t -; CHECK-RV32-NEXT: bltu a6, a3, .LBB36_10 -; CHECK-RV32-NEXT: # %bb.9: -; CHECK-RV32-NEXT: mv a0, a3 -; CHECK-RV32-NEXT: .LBB36_10: -; CHECK-RV32-NEXT: srli a3, a4, 3 -; CHECK-RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; CHECK-RV32-NEXT: srli a3, a4, 2 +; CHECK-RV32-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v24, a3 ; CHECK-RV32-NEXT: mul a3, a5, a2 ; CHECK-RV32-NEXT: add a1, a1, a3 @@ -792,55 +780,51 @@ ; ; CHECK-RV64-LABEL: strided_store_nxv17f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: addi sp, sp, -16 -; CHECK-RV64-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV64-NEXT: csrr a4, vlenb -; CHECK-RV64-NEXT: slli a4, a4, 3 -; CHECK-RV64-NEXT: sub sp, sp, a4 ; CHECK-RV64-NEXT: csrr a4, vlenb -; CHECK-RV64-NEXT: slli a7, a4, 1 +; CHECK-RV64-NEXT: slli a6, a4, 1 ; CHECK-RV64-NEXT: vmv1r.v v24, v0 -; CHECK-RV64-NEXT: addi a5, sp, 16 -; CHECK-RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill -; CHECK-RV64-NEXT: mv a6, a3 -; CHECK-RV64-NEXT: bltu a3, a7, .LBB36_2 +; CHECK-RV64-NEXT: mv a5, a3 +; CHECK-RV64-NEXT: bltu a3, a6, .LBB36_2 ; CHECK-RV64-NEXT: # %bb.1: -; CHECK-RV64-NEXT: mv a6, a7 -; CHECK-RV64-NEXT: .LBB36_2: ; CHECK-RV64-NEXT: mv a5, a6 -; CHECK-RV64-NEXT: bltu a6, a4, .LBB36_4 +; CHECK-RV64-NEXT: .LBB36_2: +; CHECK-RV64-NEXT: mv a7, a5 +; CHECK-RV64-NEXT: bltu a5, a4, .LBB36_4 ; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: mv a5, a4 +; CHECK-RV64-NEXT: mv a7, a4 ; CHECK-RV64-NEXT: .LBB36_4: -; CHECK-RV64-NEXT: li t0, 0 -; CHECK-RV64-NEXT: vl8re64.v v16, (a0) -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma +; CHECK-RV64-NEXT: addi sp, sp, -16 +; CHECK-RV64-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV64-NEXT: csrr t0, vlenb +; CHECK-RV64-NEXT: slli t0, t0, 3 +; CHECK-RV64-NEXT: sub sp, sp, t0 +; CHECK-RV64-NEXT: vl8re64.v v0, (a0) +; CHECK-RV64-NEXT: addi a0, sp, 16 +; CHECK-RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v24 ; CHECK-RV64-NEXT: vsse64.v v8, (a1), a2, v0.t -; CHECK-RV64-NEXT: sub a7, a3, a7 -; CHECK-RV64-NEXT: srli a0, a4, 2 -; CHECK-RV64-NEXT: bltu a3, a7, .LBB36_6 +; CHECK-RV64-NEXT: sub a0, a5, a4 +; CHECK-RV64-NEXT: sltu t0, a5, a0 +; CHECK-RV64-NEXT: addi t0, t0, -1 +; CHECK-RV64-NEXT: and a0, t0, a0 +; CHECK-RV64-NEXT: srli t0, a4, 3 +; CHECK-RV64-NEXT: vsetvli t1, zero, e8, mf4, ta, ma +; CHECK-RV64-NEXT: vslidedown.vx v0, v24, t0 +; CHECK-RV64-NEXT: mul a7, a7, a2 +; CHECK-RV64-NEXT: add a7, a1, a7 +; CHECK-RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-RV64-NEXT: sub a0, a3, a6 +; CHECK-RV64-NEXT: sltu a3, a3, a0 +; CHECK-RV64-NEXT: addi a3, a3, -1 +; CHECK-RV64-NEXT: and a0, a3, a0 +; CHECK-RV64-NEXT: vsse64.v v16, (a7), a2, v0.t +; CHECK-RV64-NEXT: bltu a0, a4, .LBB36_6 ; CHECK-RV64-NEXT: # %bb.5: -; CHECK-RV64-NEXT: mv t0, a7 +; CHECK-RV64-NEXT: mv a0, a4 ; CHECK-RV64-NEXT: .LBB36_6: -; CHECK-RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma -; CHECK-RV64-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-RV64-NEXT: bltu t0, a4, .LBB36_8 -; CHECK-RV64-NEXT: # %bb.7: -; CHECK-RV64-NEXT: mv t0, a4 -; CHECK-RV64-NEXT: .LBB36_8: -; CHECK-RV64-NEXT: li a0, 0 -; CHECK-RV64-NEXT: mul a3, a6, a2 -; CHECK-RV64-NEXT: add a7, a1, a3 -; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma -; CHECK-RV64-NEXT: sub a3, a6, a4 -; CHECK-RV64-NEXT: vsse64.v v16, (a7), a2, v0.t -; CHECK-RV64-NEXT: bltu a6, a3, .LBB36_10 -; CHECK-RV64-NEXT: # %bb.9: -; CHECK-RV64-NEXT: mv a0, a3 -; CHECK-RV64-NEXT: .LBB36_10: -; CHECK-RV64-NEXT: srli a3, a4, 3 -; CHECK-RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; CHECK-RV64-NEXT: srli a3, a4, 2 +; CHECK-RV64-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v24, a3 ; CHECK-RV64-NEXT: mul a3, a5, a2 ; CHECK-RV64-NEXT: add a1, a1, a3 Index: llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -645,26 +645,24 @@ define @vadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: bltu a1, a2, .LBB50_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub a2, a1, a0 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t +; CHECK-NEXT: bltu a1, a0, .LBB50_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: .LBB50_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu -; CHECK-NEXT: sub a0, a1, a2 -; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t -; CHECK-NEXT: bltu a1, a0, .LBB50_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a0 -; CHECK-NEXT: .LBB50_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t +; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -677,21 +675,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: bltu a0, a1, .LBB51_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB51_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 -; CHECK-NEXT: bltu a0, a1, .LBB51_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1545,23 +1540,21 @@ ; CHECK-LABEL: vadd_vi_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB118_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB118_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB118_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB118_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB118_4: +; CHECK-NEXT: .LBB118_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t @@ -1577,21 +1570,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: bltu a0, a1, .LBB119_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB119_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 -; CHECK-NEXT: bltu a0, a1, .LBB119_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB119_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1612,23 +1602,21 @@ ; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a4, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: srli a1, a0, 2 +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a1 ; CHECK-NEXT: slli a1, a0, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB120_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB120_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB120_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB120_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB120_4: +; CHECK-NEXT: .LBB120_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll @@ -395,22 +395,20 @@ ; CHECK-LABEL: vfabs_vv_nxv16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v0, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfabs.v v8, v8, v0.t @@ -423,21 +421,18 @@ ; CHECK-LABEL: vfabs_vv_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vfabs.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB33_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB33_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -1200,13 +1200,12 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 48 +; CHECK-NEXT: li a3, 40 ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -1215,42 +1214,35 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; CHECK-NEXT: slli a5, a1, 3 -; CHECK-NEXT: add a6, a2, a5 -; CHECK-NEXT: vl8re64.v v8, (a6) -; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: slli a6, a6, 3 -; CHECK-NEXT: add a6, sp, a6 -; CHECK-NEXT: addi a6, a6, 16 -; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: add a5, a0, a5 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vl8re64.v v8, (a5) ; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: li a6, 40 +; CHECK-NEXT: li a6, 24 ; CHECK-NEXT: mul a5, a5, a6 ; CHECK-NEXT: add a5, sp, a5 ; CHECK-NEXT: addi a5, a5, 16 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill -; CHECK-NEXT: srli a6, a1, 3 ; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: sltu a6, a4, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a5, a6, a5 +; CHECK-NEXT: srli a6, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v0, a6 -; CHECK-NEXT: bltu a4, a5, .LBB92_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a5 -; CHECK-NEXT: .LBB92_2: -; CHECK-NEXT: vl8re64.v v8, (a2) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, mu ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a0, a0, a2 @@ -1258,27 +1250,21 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a2, 40 -; CHECK-NEXT: mul a0, a0, a2 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a4, a1, .LBB92_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a4, a1, .LBB92_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a1 -; CHECK-NEXT: .LBB92_4: +; CHECK-NEXT: .LBB92_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb @@ -1287,21 +1273,21 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 48 +; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1320,53 +1306,50 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a3, a1, 3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vl8re64.v v24, (a5) -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: slli a5, a5, 3 -; CHECK-NEXT: add a5, sp, a5 -; CHECK-NEXT: addi a5, a5, 16 -; CHECK-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill ; CHECK-NEXT: add a3, a0, a3 -; CHECK-NEXT: vl8re64.v v24, (a3) -; CHECK-NEXT: sub a5, a4, a1 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 4 -; CHECK-NEXT: add a3, sp, a3 -; CHECK-NEXT: addi a3, a3, 16 -; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a4, a5, .LBB93_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a5 -; CHECK-NEXT: .LBB93_2: +; CHECK-NEXT: vl8re64.v v16, (a3) +; CHECK-NEXT: sub a3, a4, a1 +; CHECK-NEXT: sltu a5, a4, a3 +; CHECK-NEXT: addi a5, a5, -1 ; CHECK-NEXT: vl8re64.v v8, (a2) ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; CHECK-NEXT: and a0, a5, a3 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v24, v16, v8 -; CHECK-NEXT: bltu a4, a1, .LBB93_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: vfmadd.vv v16, v8, v24 +; CHECK-NEXT: bltu a4, a1, .LBB93_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a1 -; CHECK-NEXT: .LBB93_4: +; CHECK-NEXT: .LBB93_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v0, v16, v8 +; CHECK-NEXT: vfmadd.vv v0, v24, v8 ; CHECK-NEXT: vmv.v.v v8, v0 -; CHECK-NEXT: vmv8r.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 Index: llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -1200,13 +1200,12 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 48 +; CHECK-NEXT: li a3, 40 ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: li a3, 24 -; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -1215,42 +1214,35 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; CHECK-NEXT: slli a5, a1, 3 -; CHECK-NEXT: add a6, a2, a5 -; CHECK-NEXT: vl8re64.v v8, (a6) -; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: slli a6, a6, 3 -; CHECK-NEXT: add a6, sp, a6 -; CHECK-NEXT: addi a6, a6, 16 -; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill -; CHECK-NEXT: add a5, a0, a5 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vl8re64.v v8, (a5) ; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: li a6, 40 +; CHECK-NEXT: li a6, 24 ; CHECK-NEXT: mul a5, a5, a6 ; CHECK-NEXT: add a5, sp, a5 ; CHECK-NEXT: addi a5, a5, 16 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill -; CHECK-NEXT: srli a6, a1, 3 ; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: sltu a6, a4, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a5, a6, a5 +; CHECK-NEXT: srli a6, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v0, a6 -; CHECK-NEXT: bltu a4, a5, .LBB92_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a5 -; CHECK-NEXT: .LBB92_2: -; CHECK-NEXT: vl8re64.v v8, (a2) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, mu ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a0, a0, a2 @@ -1258,27 +1250,21 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a2, 40 -; CHECK-NEXT: mul a0, a0, a2 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: bltu a4, a1, .LBB92_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a4, a1, .LBB92_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a1 -; CHECK-NEXT: .LBB92_4: +; CHECK-NEXT: .LBB92_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb @@ -1287,21 +1273,21 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 48 +; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1320,53 +1306,50 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a3, a1, 3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vl8re64.v v24, (a5) -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: slli a5, a5, 3 -; CHECK-NEXT: add a5, sp, a5 -; CHECK-NEXT: addi a5, a5, 16 -; CHECK-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill ; CHECK-NEXT: add a3, a0, a3 -; CHECK-NEXT: vl8re64.v v24, (a3) -; CHECK-NEXT: sub a5, a4, a1 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 4 -; CHECK-NEXT: add a3, sp, a3 -; CHECK-NEXT: addi a3, a3, 16 -; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bltu a4, a5, .LBB93_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a5 -; CHECK-NEXT: .LBB93_2: +; CHECK-NEXT: vl8re64.v v16, (a3) +; CHECK-NEXT: sub a3, a4, a1 +; CHECK-NEXT: sltu a5, a4, a3 +; CHECK-NEXT: addi a5, a5, -1 ; CHECK-NEXT: vl8re64.v v8, (a2) ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; CHECK-NEXT: and a0, a5, a3 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v24, v16, v8 -; CHECK-NEXT: bltu a4, a1, .LBB93_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: vfmadd.vv v16, v8, v24 +; CHECK-NEXT: bltu a4, a1, .LBB93_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a1 -; CHECK-NEXT: .LBB93_4: +; CHECK-NEXT: .LBB93_2: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfmadd.vv v0, v16, v8 +; CHECK-NEXT: vfmadd.vv v0, v24, v8 ; CHECK-NEXT: vmv.v.v v8, v0 -; CHECK-NEXT: vmv8r.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 Index: llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -395,22 +395,20 @@ ; CHECK-LABEL: vfneg_vv_nxv16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v0, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfneg.v v8, v8, v0.t @@ -423,21 +421,18 @@ ; CHECK-LABEL: vfneg_vv_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB33_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB33_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -95,23 +95,21 @@ ; CHECK-LABEL: vfpext_nxv32f16_nxv32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB7_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v12, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB7_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB7_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -322,23 +322,21 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 @@ -359,23 +357,21 @@ ; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t @@ -389,21 +385,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v Index: llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -322,23 +322,21 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 @@ -359,23 +357,21 @@ ; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t @@ -389,21 +385,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v Index: llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.fptrunc.nxv2f16.nxv2f32(, , i32) @@ -99,30 +99,29 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB7_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: vslidedown.vx v0, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB7_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB7_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 @@ -140,90 +139,86 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a4, a1, 1 -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: mv a5, a2 -; CHECK-NEXT: bltu a2, a4, .LBB8_2 +; CHECK-NEXT: srli a3, a1, 2 +; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v25, v0, a3 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: slli a3, a1, 1 +; CHECK-NEXT: sub a4, a2, a3 +; CHECK-NEXT: sltu a5, a2, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 +; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: sltu a6, a4, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a6, a6, a5 +; CHECK-NEXT: srli a5, a1, 3 +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vslidedown.vx v0, v25, a5 +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma +; CHECK-NEXT: vfncvt.f.f.w v20, v8, v0.t +; CHECK-NEXT: bltu a4, a1, .LBB8_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB8_2: -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a7, a5, a1 -; CHECK-NEXT: vslidedown.vx v0, v24, a3 -; CHECK-NEXT: bltu a5, a7, .LBB8_4 +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v26, v1, a5 +; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t +; CHECK-NEXT: bltu a2, a3, .LBB8_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a6, a7 +; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB8_4: -; CHECK-NEXT: srli a7, a1, 2 -; CHECK-NEXT: slli t0, a1, 3 -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a5, a1, .LBB8_6 -; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB8_6: -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v1, v24, a7 -; CHECK-NEXT: add a7, a0, t0 -; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma -; CHECK-NEXT: sub a4, a2, a4 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: slli a5, a5, 3 -; CHECK-NEXT: add a5, sp, a5 -; CHECK-NEXT: addi a5, a5, 16 -; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload -; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t -; CHECK-NEXT: bltu a2, a4, .LBB8_8 -; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: mv a6, a4 -; CHECK-NEXT: .LBB8_8: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma -; CHECK-NEXT: vl8re64.v v16, (a7) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: sub a4, a6, a1 -; CHECK-NEXT: vslidedown.vx v0, v1, a3 -; CHECK-NEXT: bltu a6, a4, .LBB8_10 -; CHECK-NEXT: # %bb.9: -; CHECK-NEXT: mv a2, a4 -; CHECK-NEXT: .LBB8_10: -; CHECK-NEXT: vl8re64.v v16, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: sub a0, a2, a1 +; CHECK-NEXT: sltu a3, a2, a0 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a0, a3, a0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t -; CHECK-NEXT: bltu a6, a1, .LBB8_12 -; CHECK-NEXT: # %bb.11: -; CHECK-NEXT: mv a6, a1 -; CHECK-NEXT: .LBB8_12: -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma +; CHECK-NEXT: vfncvt.f.f.w v12, v24, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB8_6 +; CHECK-NEXT: # %bb.5: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB8_6: +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v8, v24, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll @@ -395,22 +395,20 @@ ; CHECK-LABEL: vfsqrt_vv_nxv16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB32_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: vslidedown.vx v0, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB32_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB32_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB32_4: +; CHECK-NEXT: .LBB32_2: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfsqrt.v v8, v8, v0.t @@ -423,21 +421,18 @@ ; CHECK-LABEL: vfsqrt_vv_nxv16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB33_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB33_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB33_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll @@ -443,26 +443,24 @@ define @vmax_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB34_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB34_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB34_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB34_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t +; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -475,21 +473,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmax.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB35_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB35_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB35_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB35_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vmax.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1057,23 +1052,21 @@ ; CHECK-LABEL: vmax_vx_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a5, a2, 2 +; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB80_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB80_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB80_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB80_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB80_4: +; CHECK-NEXT: .LBB80_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t @@ -1089,21 +1082,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vmax.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB81_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB81_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB81_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB81_4: -; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma -; CHECK-NEXT: vmax.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1124,23 +1114,21 @@ ; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB82_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB82_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB82_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB82_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB82_4: +; CHECK-NEXT: .LBB82_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll @@ -442,26 +442,24 @@ define @vmaxu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB34_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB34_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB34_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB34_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t +; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -474,21 +472,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmaxu.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB35_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB35_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB35_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB35_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1056,23 +1051,21 @@ ; CHECK-LABEL: vmaxu_vx_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a5, a2, 2 +; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB80_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB80_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB80_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB80_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB80_4: +; CHECK-NEXT: .LBB80_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t @@ -1088,21 +1081,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vmaxu.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB81_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB81_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB81_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB81_4: -; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma -; CHECK-NEXT: vmaxu.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1123,23 +1113,21 @@ ; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB82_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB82_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB82_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB82_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB82_4: +; CHECK-NEXT: .LBB82_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -443,26 +443,24 @@ define @vmin_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB34_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB34_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB34_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB34_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t +; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -475,21 +473,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmin.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB35_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB35_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB35_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB35_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vmin.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1057,23 +1052,21 @@ ; CHECK-LABEL: vmin_vx_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a5, a2, 2 +; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB80_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB80_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB80_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB80_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB80_4: +; CHECK-NEXT: .LBB80_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t @@ -1089,21 +1082,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vmin.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB81_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB81_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB81_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB81_4: -; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma -; CHECK-NEXT: vmin.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1124,23 +1114,21 @@ ; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB82_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB82_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB82_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB82_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB82_4: +; CHECK-NEXT: .LBB82_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll @@ -442,26 +442,24 @@ define @vminu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB34_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB34_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB34_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t -; CHECK-NEXT: bltu a2, a1, .LBB34_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB34_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t +; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -474,21 +472,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vminu.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB35_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB35_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB35_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB35_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vminu.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1056,23 +1051,21 @@ ; CHECK-LABEL: vminu_vx_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a5, a2, 2 +; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB80_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB80_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB80_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB80_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB80_4: +; CHECK-NEXT: .LBB80_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t @@ -1088,21 +1081,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vminu.vx v16, v16, a0 ; CHECK-NEXT: bltu a1, a2, .LBB81_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB81_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: sub a2, a1, a2 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 -; CHECK-NEXT: bltu a1, a2, .LBB81_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB81_4: -; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma -; CHECK-NEXT: vminu.vx v16, v16, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1123,23 +1113,21 @@ ; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB82_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB82_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t -; CHECK-NEXT: bltu a1, a2, .LBB82_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB82_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB82_4: +; CHECK-NEXT: .LBB82_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -257,25 +257,23 @@ ; RV32-LABEL: vpgather_baseidx_nxv32i8: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 -; RV32-NEXT: li a3, 0 -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: srli a5, a2, 2 -; RV32-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a2, a3, 1 ; RV32-NEXT: sub a4, a1, a2 -; RV32-NEXT: vslidedown.vx v0, v0, a5 -; RV32-NEXT: bltu a1, a4, .LBB12_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a3, a4 -; RV32-NEXT: .LBB12_2: -; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma +; RV32-NEXT: sltu a5, a1, a4 +; RV32-NEXT: addi a5, a5, -1 +; RV32-NEXT: and a4, a5, a4 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a3 +; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v10 -; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, a4, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v18, (a0), v24, v0.t -; RV32-NEXT: bltu a1, a2, .LBB12_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a1, a2, .LBB12_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB12_4: +; RV32-NEXT: .LBB12_2: ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu @@ -286,66 +284,61 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv32i8: ; RV64: # %bb.0: -; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a5, a3, 1 -; RV64-NEXT: sub a6, a1, a5 -; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a4, 0 -; RV64-NEXT: li a2, 0 -; RV64-NEXT: bltu a1, a6, .LBB12_2 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a4, a2, 1 +; RV64-NEXT: sub a3, a1, a4 +; RV64-NEXT: sltu a5, a1, a3 +; RV64-NEXT: addi a5, a5, -1 +; RV64-NEXT: and a3, a5, a3 +; RV64-NEXT: vmv1r.v v17, v0 +; RV64-NEXT: mv a5, a3 +; RV64-NEXT: bltu a3, a2, .LBB12_2 ; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a6 +; RV64-NEXT: mv a5, a2 ; RV64-NEXT: .LBB12_2: -; RV64-NEXT: sub a6, a2, a3 -; RV64-NEXT: mv a7, a4 -; RV64-NEXT: bltu a2, a6, .LBB12_4 +; RV64-NEXT: srli a6, a2, 2 +; RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma +; RV64-NEXT: vslidedown.vx v16, v17, a6 +; RV64-NEXT: vsetvli a6, zero, e64, m8, ta, ma +; RV64-NEXT: vsext.vf8 v24, v10 +; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, mu +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: vluxei64.v v14, (a0), v24, v0.t +; RV64-NEXT: bltu a1, a4, .LBB12_4 ; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a7, a6 +; RV64-NEXT: mv a1, a4 ; RV64-NEXT: .LBB12_4: -; RV64-NEXT: srli a6, a3, 2 -; RV64-NEXT: vsetvli t0, zero, e8, mf2, ta, ma -; RV64-NEXT: vslidedown.vx v13, v12, a6 -; RV64-NEXT: srli a6, a3, 3 -; RV64-NEXT: vsetvli t0, zero, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vx v0, v13, a6 -; RV64-NEXT: vsetvli t0, zero, e64, m8, ta, ma -; RV64-NEXT: vsext.vf8 v24, v11 -; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, mu -; RV64-NEXT: vluxei64.v v19, (a0), v24, v0.t -; RV64-NEXT: bltu a1, a5, .LBB12_6 +; RV64-NEXT: sub a4, a1, a2 +; RV64-NEXT: sltu a5, a1, a4 +; RV64-NEXT: addi a5, a5, -1 +; RV64-NEXT: and a5, a5, a4 +; RV64-NEXT: srli a4, a2, 3 +; RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v17, a4 +; RV64-NEXT: vsetvli a6, zero, e64, m8, ta, ma +; RV64-NEXT: vsext.vf8 v24, v9 +; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, mu +; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t +; RV64-NEXT: bltu a1, a2, .LBB12_6 ; RV64-NEXT: # %bb.5: -; RV64-NEXT: mv a1, a5 +; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB12_6: -; RV64-NEXT: sub a5, a1, a3 -; RV64-NEXT: bltu a1, a5, .LBB12_8 -; RV64-NEXT: # %bb.7: -; RV64-NEXT: mv a4, a5 -; RV64-NEXT: .LBB12_8: -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV64-NEXT: vslidedown.vx v0, v12, a6 ; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, ma -; RV64-NEXT: vsext.vf8 v24, v9 -; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, mu -; RV64-NEXT: vluxei64.v v17, (a0), v24, v0.t -; RV64-NEXT: bltu a1, a3, .LBB12_10 -; RV64-NEXT: # %bb.9: -; RV64-NEXT: mv a1, a3 -; RV64-NEXT: .LBB12_10: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t -; RV64-NEXT: bltu a2, a3, .LBB12_12 -; RV64-NEXT: # %bb.11: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB12_12: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV64-NEXT: vsext.vf8 v24, v10 -; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu -; RV64-NEXT: vmv1r.v v0, v13 -; RV64-NEXT: vluxei64.v v18, (a0), v24, v0.t -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv1r.v v0, v17 +; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t +; RV64-NEXT: sub a1, a3, a2 +; RV64-NEXT: sltu a2, a3, a1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a1, a2, a1 +; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v16, a4 +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-NEXT: vsext.vf8 v16, v11 +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t +; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs %v = call @llvm.vp.gather.nxv32i8.nxv32p0i8( %ptrs, %m, i32 %evl) @@ -2295,22 +2288,20 @@ ; RV32-LABEL: vpgather_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v24, v0 -; RV32-NEXT: li a2, 0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: srli a4, a1, 3 -; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a3, a0, a1 -; RV32-NEXT: vslidedown.vx v0, v0, a4 -; RV32-NEXT: bltu a0, a3, .LBB102_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a2, a3 -; RV32-NEXT: .LBB102_2: +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: srli a3, a1, 3 +; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a3 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v12, v0.t -; RV32-NEXT: bltu a0, a1, .LBB102_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a0, a1, .LBB102_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB102_4: +; RV32-NEXT: .LBB102_2: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t @@ -2320,22 +2311,20 @@ ; RV64-LABEL: vpgather_nxv16f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 -; RV64-NEXT: li a2, 0 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: srli a4, a1, 3 -; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a3, a0, a1 -; RV64-NEXT: vslidedown.vx v0, v0, a4 -; RV64-NEXT: bltu a0, a3, .LBB102_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a3 -; RV64-NEXT: .LBB102_2: +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: srli a3, a1, 3 +; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a3 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t -; RV64-NEXT: bltu a0, a1, .LBB102_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a0, a1, .LBB102_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB102_4: +; RV64-NEXT: .LBB102_2: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t @@ -2348,25 +2337,23 @@ ; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 -; RV32-NEXT: li a3, 0 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: srli a5, a2, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a4, a1, a2 -; RV32-NEXT: vslidedown.vx v0, v0, a5 -; RV32-NEXT: bltu a1, a4, .LBB103_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a3, a4 -; RV32-NEXT: .LBB103_2: +; RV32-NEXT: sub a3, a1, a2 +; RV32-NEXT: sltu a4, a1, a3 +; RV32-NEXT: addi a4, a4, -1 +; RV32-NEXT: and a3, a4, a3 +; RV32-NEXT: srli a4, a2, 3 +; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a4 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t -; RV32-NEXT: bltu a1, a2, .LBB103_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a1, a2, .LBB103_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB103_4: +; RV32-NEXT: .LBB103_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2375,28 +2362,25 @@ ; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a3, 0 ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV64-NEXT: vsext.vf4 v24, v8 +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v24, v16, 3 ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: srli a5, a2, 3 -; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a4, a1, a2 -; RV64-NEXT: vslidedown.vx v0, v0, a5 -; RV64-NEXT: bltu a1, a4, .LBB103_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, a4 -; RV64-NEXT: .LBB103_2: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma -; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: sub a3, a1, a2 +; RV64-NEXT: sltu a4, a1, a3 +; RV64-NEXT: addi a4, a4, -1 +; RV64-NEXT: and a3, a4, a3 +; RV64-NEXT: srli a4, a2, 3 +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a4 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t -; RV64-NEXT: bltu a1, a2, .LBB103_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB103_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a1, a2 -; RV64-NEXT: .LBB103_4: +; RV64-NEXT: .LBB103_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2410,25 +2394,23 @@ ; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 -; RV32-NEXT: li a3, 0 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: srli a5, a2, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a4, a1, a2 -; RV32-NEXT: vslidedown.vx v0, v0, a5 -; RV32-NEXT: bltu a1, a4, .LBB104_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a3, a4 -; RV32-NEXT: .LBB104_2: +; RV32-NEXT: sub a3, a1, a2 +; RV32-NEXT: sltu a4, a1, a3 +; RV32-NEXT: addi a4, a4, -1 +; RV32-NEXT: and a3, a4, a3 +; RV32-NEXT: srli a4, a2, 3 +; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a4 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t -; RV32-NEXT: bltu a1, a2, .LBB104_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a1, a2, .LBB104_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB104_4: +; RV32-NEXT: .LBB104_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2437,28 +2419,25 @@ ; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a3, 0 ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vsext.vf4 v24, v8 +; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: srli a5, a2, 3 -; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a4, a1, a2 -; RV64-NEXT: vslidedown.vx v0, v0, a5 -; RV64-NEXT: bltu a1, a4, .LBB104_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, a4 -; RV64-NEXT: .LBB104_2: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma -; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: sub a3, a1, a2 +; RV64-NEXT: sltu a4, a1, a3 +; RV64-NEXT: addi a4, a4, -1 +; RV64-NEXT: and a3, a4, a3 +; RV64-NEXT: srli a4, a2, 3 +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a4 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t -; RV64-NEXT: bltu a1, a2, .LBB104_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB104_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a1, a2 -; RV64-NEXT: .LBB104_4: +; RV64-NEXT: .LBB104_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t @@ -2473,25 +2452,23 @@ ; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 -; RV32-NEXT: li a3, 0 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: srli a5, a2, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a4, a1, a2 -; RV32-NEXT: vslidedown.vx v0, v0, a5 -; RV32-NEXT: bltu a1, a4, .LBB105_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a3, a4 -; RV32-NEXT: .LBB105_2: +; RV32-NEXT: sub a3, a1, a2 +; RV32-NEXT: sltu a4, a1, a3 +; RV32-NEXT: addi a4, a4, -1 +; RV32-NEXT: and a3, a4, a3 +; RV32-NEXT: srli a4, a2, 3 +; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a4 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t -; RV32-NEXT: bltu a1, a2, .LBB105_4 -; RV32-NEXT: # %bb.3: +; RV32-NEXT: bltu a1, a2, .LBB105_2 +; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB105_4: +; RV32-NEXT: .LBB105_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2500,28 +2477,25 @@ ; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 -; RV64-NEXT: li a3, 0 ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v10 ; RV64-NEXT: vzext.vf4 v24, v8 +; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: srli a5, a2, 3 -; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a4, a1, a2 -; RV64-NEXT: vslidedown.vx v0, v0, a5 -; RV64-NEXT: bltu a1, a4, .LBB105_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, a4 -; RV64-NEXT: .LBB105_2: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma -; RV64-NEXT: vsll.vi v24, v24, 3 +; RV64-NEXT: sub a3, a1, a2 +; RV64-NEXT: sltu a4, a1, a3 +; RV64-NEXT: addi a4, a4, -1 +; RV64-NEXT: and a3, a4, a3 +; RV64-NEXT: srli a4, a2, 3 +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a4 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t -; RV64-NEXT: bltu a1, a2, .LBB105_4 -; RV64-NEXT: # %bb.3: +; RV64-NEXT: bltu a1, a2, .LBB105_2 +; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a1, a2 -; RV64-NEXT: .LBB105_4: +; RV64-NEXT: .LBB105_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t Index: llvm/test/CodeGen/RISCV/rvv/vpload.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -454,24 +454,22 @@ ; CHECK-LABEL: vpload_nxv16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a5, a2, 3 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB37_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB37_2: +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: srli a4, a2, 3 +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: slli a4, a2, 3 ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v16, (a4), v0.t -; CHECK-NEXT: bltu a1, a2, .LBB37_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB37_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB37_4: +; CHECK-NEXT: .LBB37_2: ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a0), v0.t @@ -502,40 +500,36 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a5 ; CHECK-NEXT: .LBB38_2: -; CHECK-NEXT: sub a7, a4, a3 -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: bltu a4, a7, .LBB38_4 +; CHECK-NEXT: sub a6, a4, a3 +; CHECK-NEXT: sltu a7, a4, a6 +; CHECK-NEXT: addi a7, a7, -1 +; CHECK-NEXT: and a6, a7, a6 +; CHECK-NEXT: srli a7, a3, 3 +; CHECK-NEXT: vsetvli t0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v8, a7 +; CHECK-NEXT: slli a7, a3, 3 +; CHECK-NEXT: add a7, a0, a7 +; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v16, (a7), v0.t +; CHECK-NEXT: sub a5, a2, a5 +; CHECK-NEXT: sltu a2, a2, a5 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a2, a2, a5 +; CHECK-NEXT: bltu a2, a3, .LBB38_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a6, a7 +; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB38_4: -; CHECK-NEXT: li a7, 0 -; CHECK-NEXT: srli t0, a3, 3 -; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v8, t0 -; CHECK-NEXT: slli t0, a3, 3 -; CHECK-NEXT: add t0, a0, t0 -; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, mu -; CHECK-NEXT: vle64.v v16, (t0), v0.t -; CHECK-NEXT: srli a6, a3, 2 -; CHECK-NEXT: sub t0, a2, a5 +; CHECK-NEXT: srli a5, a3, 2 +; CHECK-NEXT: vsetvli a6, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v8, a5 ; CHECK-NEXT: slli a5, a3, 4 -; CHECK-NEXT: bltu a2, t0, .LBB38_6 +; CHECK-NEXT: add a5, a0, a5 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a5), v0.t +; CHECK-NEXT: bltu a4, a3, .LBB38_6 ; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a7, t0 -; CHECK-NEXT: .LBB38_6: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v8, a6 -; CHECK-NEXT: add a2, a0, a5 -; CHECK-NEXT: bltu a7, a3, .LBB38_8 -; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: mv a7, a3 -; CHECK-NEXT: .LBB38_8: -; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, mu -; CHECK-NEXT: vle64.v v24, (a2), v0.t -; CHECK-NEXT: bltu a4, a3, .LBB38_10 -; CHECK-NEXT: # %bb.9: ; CHECK-NEXT: mv a4, a3 -; CHECK-NEXT: .LBB38_10: +; CHECK-NEXT: .LBB38_6: ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a0), v0.t Index: llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -368,110 +368,44 @@ declare @llvm.vp.merge.nxv128i8(, , , i32) define @vpmerge_vv_nxv128i8( %va, %vb, %m, i32 zeroext %evl) { -; RV32-LABEL: vpmerge_vv_nxv128i8: -; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a4, 24 -; RV32-NEXT: mul a1, a1, a4 -; RV32-NEXT: sub sp, sp, a1 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 3 -; RV32-NEXT: add a4, a0, a1 -; RV32-NEXT: vl8r.v v24, (a4) -; RV32-NEXT: csrr a4, vlenb -; RV32-NEXT: slli a4, a4, 3 -; RV32-NEXT: add a4, sp, a4 -; RV32-NEXT: addi a4, a4, 16 -; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill -; RV32-NEXT: vsetvli a4, zero, e8, m8, ta, ma -; RV32-NEXT: vlm.v v2, (a2) -; RV32-NEXT: sub a4, a3, a1 -; RV32-NEXT: vmv1r.v v1, v0 -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: slli a2, a2, 4 -; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bltu a3, a4, .LBB28_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a2, a4 -; RV32-NEXT: .LBB28_2: -; RV32-NEXT: vl8r.v v8, (a0) -; RV32-NEXT: vsetvli zero, a2, e8, m8, tu, ma -; RV32-NEXT: vmv1r.v v0, v2 -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 4 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 3 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vmerge.vvm v16, v16, v24, v0 -; RV32-NEXT: bltu a3, a1, .LBB28_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a3, a1 -; RV32-NEXT: .LBB28_4: -; RV32-NEXT: vsetvli zero, a3, e8, m8, tu, ma -; RV32-NEXT: vmv1r.v v0, v1 -; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vmerge.vvm v8, v8, v24, v0 -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: li a1, 24 -; RV32-NEXT: mul a0, a0, a1 -; RV32-NEXT: add sp, sp, a0 -; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: ret -; -; RV64-LABEL: vpmerge_vv_nxv128i8: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: .cfi_def_cfa_offset 16 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 -; RV64-NEXT: sub sp, sp, a1 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 3 -; RV64-NEXT: add a4, a0, a1 -; RV64-NEXT: vl8r.v v24, (a4) -; RV64-NEXT: vsetvli a4, zero, e8, m8, ta, ma -; RV64-NEXT: vlm.v v2, (a2) -; RV64-NEXT: sub a4, a3, a1 -; RV64-NEXT: vmv1r.v v1, v0 -; RV64-NEXT: addi a2, sp, 16 -; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; RV64-NEXT: li a2, 0 -; RV64-NEXT: bltu a3, a4, .LBB28_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a2, a4 -; RV64-NEXT: .LBB28_2: -; RV64-NEXT: vl8r.v v8, (a0) -; RV64-NEXT: vsetvli zero, a2, e8, m8, tu, ma -; RV64-NEXT: vmv1r.v v0, v2 -; RV64-NEXT: vmerge.vvm v24, v24, v16, v0 -; RV64-NEXT: bltu a3, a1, .LBB28_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a3, a1 -; RV64-NEXT: .LBB28_4: -; RV64-NEXT: vsetvli zero, a3, e8, m8, tu, ma -; RV64-NEXT: vmv1r.v v0, v1 -; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload -; RV64-NEXT: vmerge.vvm v8, v8, v16, v0 -; RV64-NEXT: vmv8r.v v16, v24 -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 3 -; RV64-NEXT: add sp, sp, a0 -; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ret +; CHECK-LABEL: vpmerge_vv_nxv128i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv8r.v v24, v16 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a4, a0, a1 +; CHECK-NEXT: vl8r.v v16, (a4) +; CHECK-NEXT: vl8r.v v8, (a0) +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-NEXT: sub a0, a3, a1 +; CHECK-NEXT: vlm.v v0, (a2) +; CHECK-NEXT: sltu a2, a3, a0 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 +; CHECK-NEXT: bltu a3, a1, .LBB28_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB28_2: +; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret %v = call @llvm.vp.merge.nxv128i8( %m, %va, %vb, i32 %evl) ret %v } @@ -479,26 +413,24 @@ define @vpmerge_vx_nxv128i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a3, .LBB29_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma +; CHECK-NEXT: vmerge.vxm v16, v16, a0, v0 +; CHECK-NEXT: bltu a2, a1, .LBB29_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB29_2: -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, tu, ma -; CHECK-NEXT: sub a1, a2, a3 -; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 -; CHECK-NEXT: bltu a2, a1, .LBB29_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB29_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmerge.vxm v16, v16, a0, v0 +; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 %va = shufflevector %elt.head, poison, zeroinitializer @@ -509,26 +441,24 @@ define @vpmerge_vi_nxv128i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: bltu a1, a2, .LBB30_2 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub a2, a1, a0 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma +; CHECK-NEXT: vmerge.vim v16, v16, 2, v0 +; CHECK-NEXT: bltu a1, a0, .LBB30_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: .LBB30_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma -; CHECK-NEXT: sub a0, a1, a2 -; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 -; CHECK-NEXT: bltu a1, a0, .LBB30_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a0 -; CHECK-NEXT: .LBB30_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vmerge.vim v16, v16, 2, v0 +; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 %va = shufflevector %elt.head, poison, zeroinitializer Index: llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -2039,18 +2039,16 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a2, a0 ; RV32-NEXT: .LBB95_2: -; RV32-NEXT: li a3, 0 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t -; RV32-NEXT: srli a2, a0, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: vslidedown.vx v0, v0, a2 -; RV32-NEXT: bltu a1, a0, .LBB95_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a3, a0 -; RV32-NEXT: .LBB95_4: -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: sub a2, a1, a0 +; RV32-NEXT: sltu a1, a1, a2 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a1, a1, a2 +; RV32-NEXT: srli a0, a0, 3 +; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a0 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t ; RV32-NEXT: ret ; @@ -2061,33 +2059,31 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: sub sp, sp, a1 -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vl8re64.v v16, (a0) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a3, a1, 3 -; RV64-NEXT: add a0, a0, a3 -; RV64-NEXT: mv a3, a2 +; RV64-NEXT: add a3, a0, a3 +; RV64-NEXT: vl8re64.v v24, (a3) +; RV64-NEXT: addi a3, sp, 16 +; RV64-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV64-NEXT: vl8re64.v v24, (a0) +; RV64-NEXT: mv a0, a2 ; RV64-NEXT: bltu a2, a1, .LBB95_2 ; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a3, a1 +; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB95_2: -; RV64-NEXT: li a4, 0 -; RV64-NEXT: vl8re64.v v24, (a0) -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t -; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t ; RV64-NEXT: sub a0, a2, a1 -; RV64-NEXT: vslidedown.vx v0, v0, a3 -; RV64-NEXT: bltu a2, a0, .LBB95_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, a0 -; RV64-NEXT: .LBB95_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; RV64-NEXT: sltu a2, a2, a0 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a0, a2, a0 +; RV64-NEXT: srli a1, a1, 3 +; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload -; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t +; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 @@ -2110,18 +2106,16 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB96_2: -; RV32-NEXT: li a4, 0 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t -; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a1, a2, a1 -; RV32-NEXT: vslidedown.vx v0, v0, a3 -; RV32-NEXT: bltu a2, a1, .LBB96_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a4, a1 -; RV32-NEXT: .LBB96_4: -; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; RV32-NEXT: sub a3, a2, a1 +; RV32-NEXT: sltu a2, a2, a3 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a2, a2, a3 +; RV32-NEXT: srli a1, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a1 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; @@ -2130,39 +2124,47 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a3, a3, 3 +; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 -; RV64-NEXT: vl4re16.v v4, (a1) -; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl4re16.v v24, (a1) +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV64-NEXT: vsext.vf4 v16, v4 +; RV64-NEXT: vsext.vf4 v16, v26 ; RV64-NEXT: vsll.vi v16, v16, 3 +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: vsext.vf4 v16, v24 +; RV64-NEXT: vsll.vi v24, v16, 3 ; RV64-NEXT: mv a3, a2 -; RV64-NEXT: vsext.vf4 v24, v6 ; RV64-NEXT: bltu a2, a1, .LBB96_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB96_2: -; RV64-NEXT: li a4, 0 -; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a1, a2, a1 -; RV64-NEXT: vslidedown.vx v0, v0, a3 -; RV64-NEXT: bltu a2, a1, .LBB96_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, a1 -; RV64-NEXT: .LBB96_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t +; RV64-NEXT: sub a3, a2, a1 +; RV64-NEXT: sltu a2, a2, a3 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a3 +; RV64-NEXT: srli a1, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a1 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -2184,18 +2186,16 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB97_2: -; RV32-NEXT: li a4, 0 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t -; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a1, a2, a1 -; RV32-NEXT: vslidedown.vx v0, v0, a3 -; RV32-NEXT: bltu a2, a1, .LBB97_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a4, a1 -; RV32-NEXT: .LBB97_4: -; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; RV32-NEXT: sub a3, a2, a1 +; RV32-NEXT: sltu a2, a2, a3 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a2, a2, a3 +; RV32-NEXT: srli a1, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a1 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; @@ -2204,39 +2204,47 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a3, a3, 3 +; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 -; RV64-NEXT: vl4re16.v v4, (a1) -; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl4re16.v v24, (a1) +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV64-NEXT: vsext.vf4 v16, v4 +; RV64-NEXT: vsext.vf4 v16, v26 ; RV64-NEXT: vsll.vi v16, v16, 3 +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: vsext.vf4 v16, v24 +; RV64-NEXT: vsll.vi v24, v16, 3 ; RV64-NEXT: mv a3, a2 -; RV64-NEXT: vsext.vf4 v24, v6 ; RV64-NEXT: bltu a2, a1, .LBB97_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB97_2: -; RV64-NEXT: li a4, 0 -; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a1, a2, a1 -; RV64-NEXT: vslidedown.vx v0, v0, a3 -; RV64-NEXT: bltu a2, a1, .LBB97_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, a1 -; RV64-NEXT: .LBB97_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t +; RV64-NEXT: sub a3, a2, a1 +; RV64-NEXT: sltu a2, a2, a3 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a3 +; RV64-NEXT: srli a1, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a1 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -2259,18 +2267,16 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB98_2: -; RV32-NEXT: li a4, 0 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t -; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV32-NEXT: sub a1, a2, a1 -; RV32-NEXT: vslidedown.vx v0, v0, a3 -; RV32-NEXT: bltu a2, a1, .LBB98_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a4, a1 -; RV32-NEXT: .LBB98_4: -; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; RV32-NEXT: sub a3, a2, a1 +; RV32-NEXT: sltu a2, a2, a3 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a2, a2, a3 +; RV32-NEXT: srli a1, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a1 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; @@ -2279,39 +2285,47 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a3, vlenb -; RV64-NEXT: slli a3, a3, 3 +; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 -; RV64-NEXT: vl4re16.v v4, (a1) -; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl4re16.v v24, (a1) +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV64-NEXT: vzext.vf4 v16, v4 +; RV64-NEXT: vzext.vf4 v16, v26 ; RV64-NEXT: vsll.vi v16, v16, 3 +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: vzext.vf4 v16, v24 +; RV64-NEXT: vsll.vi v24, v16, 3 ; RV64-NEXT: mv a3, a2 -; RV64-NEXT: vzext.vf4 v24, v6 ; RV64-NEXT: bltu a2, a1, .LBB98_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB98_2: -; RV64-NEXT: li a4, 0 -; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma -; RV64-NEXT: sub a1, a2, a1 -; RV64-NEXT: vslidedown.vx v0, v0, a3 -; RV64-NEXT: bltu a2, a1, .LBB98_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a4, a1 -; RV64-NEXT: .LBB98_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t +; RV64-NEXT: sub a3, a2, a1 +; RV64-NEXT: sltu a2, a2, a3 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: and a2, a2, a3 +; RV64-NEXT: srli a1, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a1 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vpstore.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -375,20 +375,18 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB30_2: -; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t -; CHECK-NEXT: srli a5, a2, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a3, .LBB30_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a3 -; CHECK-NEXT: .LBB30_4: -; CHECK-NEXT: slli a1, a2, 3 -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma +; CHECK-NEXT: sltu a1, a1, a3 +; CHECK-NEXT: addi a1, a1, -1 +; CHECK-NEXT: and a1, a1, a3 +; CHECK-NEXT: srli a3, a2, 3 +; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a3 +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv16f64.p0nxv16f64( %val, * %ptr, %m, i32 %evl) @@ -402,62 +400,58 @@ define void @vpstore_nxv17f64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv17f64: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a4, a3, 1 ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: addi a5, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; CHECK-NEXT: mv a5, a2 ; CHECK-NEXT: bltu a2, a4, .LBB31_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a5, a4 ; CHECK-NEXT: .LBB31_2: -; CHECK-NEXT: mv a7, a5 +; CHECK-NEXT: mv a6, a5 ; CHECK-NEXT: bltu a5, a3, .LBB31_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a7, a3 +; CHECK-NEXT: mv a6, a3 ; CHECK-NEXT: .LBB31_4: -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vl8re64.v v16, (a0) -; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma -; CHECK-NEXT: sub a0, a5, a3 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a7, vlenb +; CHECK-NEXT: slli a7, a7, 3 +; CHECK-NEXT: sub sp, sp, a7 +; CHECK-NEXT: vl8re64.v v0, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vse64.v v8, (a1), v0.t -; CHECK-NEXT: bltu a5, a0, .LBB31_6 -; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a6, a0 -; CHECK-NEXT: .LBB31_6: -; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: sub a0, a5, a3 +; CHECK-NEXT: sltu a5, a5, a0 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a0, a5, a0 ; CHECK-NEXT: srli a5, a3, 3 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v24, a5 ; CHECK-NEXT: slli a5, a3, 3 ; CHECK-NEXT: add a5, a1, a5 -; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma -; CHECK-NEXT: addi a6, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a6) # Unknown-size Folded Reload -; CHECK-NEXT: vse64.v v8, (a5), v0.t -; CHECK-NEXT: srli a5, a3, 2 -; CHECK-NEXT: sub a6, a2, a4 -; CHECK-NEXT: slli a4, a3, 4 -; CHECK-NEXT: bltu a2, a6, .LBB31_8 -; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: mv a0, a6 -; CHECK-NEXT: .LBB31_8: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v24, a5 -; CHECK-NEXT: add a1, a1, a4 -; CHECK-NEXT: bltu a0, a3, .LBB31_10 -; CHECK-NEXT: # %bb.9: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sub a0, a2, a4 +; CHECK-NEXT: sltu a2, a2, a0 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: vse64.v v16, (a5), v0.t +; CHECK-NEXT: bltu a0, a3, .LBB31_6 +; CHECK-NEXT: # %bb.5: ; CHECK-NEXT: mv a0, a3 -; CHECK-NEXT: .LBB31_10: +; CHECK-NEXT: .LBB31_6: +; CHECK-NEXT: srli a2, a3, 2 +; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v24, a2 +; CHECK-NEXT: slli a2, a3, 4 +; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vse64.v v16, (a1), v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vse64.v v8, (a1), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 Index: llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -93,30 +93,27 @@ define half @vpreduce_fadd_nxv64f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a1, a2, 1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v24, v0, a2 +; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: sub a1, a0, a2 +; CHECK-NEXT: sltu a3, a0, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: slli a2, a2, 2 ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: mv a3, a0 ; CHECK-NEXT: bltu a0, a2, .LBB6_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: .LBB6_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a1 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: sub a1, a0, a2 ; CHECK-NEXT: vfmv.s.f v8, ft0 -; CHECK-NEXT: bltu a0, a1, .LBB6_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a1 -; CHECK-NEXT: .LBB6_4: -; CHECK-NEXT: vsetvli zero, a4, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredusum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -128,30 +125,27 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: srli a1, a2, 1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v24, v0, a2 +; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: sub a1, a0, a2 +; CHECK-NEXT: sltu a3, a0, a1 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: slli a2, a2, 2 ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: mv a3, a0 ; CHECK-NEXT: bltu a0, a2, .LBB7_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a1 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: sub a1, a0, a2 ; CHECK-NEXT: vfmv.s.f v8, ft0 -; CHECK-NEXT: bltu a0, a1, .LBB7_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a1 -; CHECK-NEXT: .LBB7_4: -; CHECK-NEXT: vsetvli zero, a4, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredosum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 Index: llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -1153,30 +1153,27 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv32i32: ; RV32: # %bb.0: -; RV32-NEXT: csrr a3, vlenb -; RV32-NEXT: srli a2, a3, 2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: srli a3, a2, 2 +; RV32-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; RV32-NEXT: vslidedown.vx v24, v0, a3 +; RV32-NEXT: slli a3, a2, 1 +; RV32-NEXT: sub a2, a1, a3 +; RV32-NEXT: sltu a4, a1, a2 +; RV32-NEXT: addi a4, a4, -1 +; RV32-NEXT: and a2, a4, a2 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; RV32-NEXT: slli a3, a3, 1 ; RV32-NEXT: vmv.s.x v25, a0 -; RV32-NEXT: mv a0, a1 ; RV32-NEXT: bltu a1, a3, .LBB67_2 ; RV32-NEXT: # %bb.1: -; RV32-NEXT: mv a0, a3 +; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB67_2: -; RV32-NEXT: li a4, 0 -; RV32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; RV32-NEXT: vslidedown.vx v24, v0, a2 -; RV32-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; RV32-NEXT: vredmaxu.vs v25, v8, v25, v0.t -; RV32-NEXT: vmv.x.s a2, v25 +; RV32-NEXT: vmv.x.s a0, v25 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; RV32-NEXT: sub a0, a1, a3 -; RV32-NEXT: vmv.s.x v8, a2 -; RV32-NEXT: bltu a1, a0, .LBB67_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: mv a4, a0 -; RV32-NEXT: .LBB67_4: -; RV32-NEXT: vsetvli zero, a4, e32, m8, tu, ma +; RV32-NEXT: vmv.s.x v8, a0 +; RV32-NEXT: vsetvli zero, a2, e32, m8, tu, ma ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vredmaxu.vs v8, v16, v8, v0.t ; RV32-NEXT: vmv.x.s a0, v8 @@ -1186,30 +1183,27 @@ ; RV64: # %bb.0: ; RV64-NEXT: csrr a3, vlenb ; RV64-NEXT: srli a2, a3, 2 -; RV64-NEXT: slli a4, a0, 32 -; RV64-NEXT: slli a0, a3, 1 -; RV64-NEXT: srli a3, a4, 32 -; RV64-NEXT: mv a4, a1 -; RV64-NEXT: bltu a1, a0, .LBB67_2 +; RV64-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; RV64-NEXT: vslidedown.vx v24, v0, a2 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a2, a0, 32 +; RV64-NEXT: slli a3, a3, 1 +; RV64-NEXT: sub a0, a1, a3 +; RV64-NEXT: sltu a4, a1, a0 +; RV64-NEXT: addi a4, a4, -1 +; RV64-NEXT: and a0, a4, a0 +; RV64-NEXT: bltu a1, a3, .LBB67_2 ; RV64-NEXT: # %bb.1: -; RV64-NEXT: mv a4, a0 +; RV64-NEXT: mv a1, a3 ; RV64-NEXT: .LBB67_2: -; RV64-NEXT: li a5, 0 -; RV64-NEXT: vsetvli a6, zero, e8, mf2, ta, ma -; RV64-NEXT: vslidedown.vx v24, v0, a2 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; RV64-NEXT: vmv.s.x v25, a3 -; RV64-NEXT: vsetvli zero, a4, e32, m8, tu, ma +; RV64-NEXT: vmv.s.x v25, a2 +; RV64-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; RV64-NEXT: vredmaxu.vs v25, v8, v25, v0.t -; RV64-NEXT: vmv.x.s a2, v25 +; RV64-NEXT: vmv.x.s a1, v25 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: vmv.s.x v8, a2 -; RV64-NEXT: bltu a1, a0, .LBB67_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: mv a5, a0 -; RV64-NEXT: .LBB67_4: -; RV64-NEXT: vsetvli zero, a5, e32, m8, tu, ma +; RV64-NEXT: vmv.s.x v8, a1 +; RV64-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vredmaxu.vs v8, v16, v8, v0.t ; RV64-NEXT: vmv.x.s a0, v8 Index: llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -389,30 +389,27 @@ define signext i1 @vpreduce_or_nxv128i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv128i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vcpop.m a3, v8, v0.t +; CHECK-NEXT: snez a3, a3 ; CHECK-NEXT: bltu a1, a2, .LBB22_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB22_2: -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vcpop.m a3, v11, v0.t -; CHECK-NEXT: snez a3, a3 -; CHECK-NEXT: sub a2, a1, a2 -; CHECK-NEXT: or a0, a3, a0 -; CHECK-NEXT: bltu a1, a2, .LBB22_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: .LBB22_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vcpop.m a1, v8, v0.t +; CHECK-NEXT: vcpop.m a1, v11, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: or a0, a3, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -357,32 +357,30 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a4, a1, 3 -; CHECK-NEXT: add a4, a0, a4 -; CHECK-NEXT: vl8re32.v v8, (a4) -; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a1, a3, 3 +; CHECK-NEXT: add a1, a0, a1 +; CHECK-NEXT: vl8re32.v v8, (a1) +; CHECK-NEXT: slli a1, a3, 1 ; CHECK-NEXT: sub a4, a2, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a2, a4, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: sltu a5, a2, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 +; CHECK-NEXT: srli a3, a3, 2 +; CHECK-NEXT: vl8re32.v v0, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v24, a3 +; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 -; CHECK-NEXT: bltu a2, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a1, .LBB27_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB27_4: +; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -415,32 +413,30 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a2, a1, 3 ; CHECK-NEXT: add a2, a0, a2 ; CHECK-NEXT: vl8re32.v v8, (a2) -; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: sub a4, a1, a2 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a1, a4, .LBB28_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB28_2: -; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: sub a3, a1, a2 +; CHECK-NEXT: sltu a4, a1, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vl8re32.v v0, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v24, a4 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 -; CHECK-NEXT: bltu a1, a2, .LBB28_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a1, a2, .LBB28_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB28_4: +; CHECK-NEXT: .LBB28_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -703,31 +699,29 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a4, a1, 3 -; CHECK-NEXT: add a4, a0, a4 -; CHECK-NEXT: vl8re64.v v8, (a4) -; CHECK-NEXT: srli a5, a1, 3 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a4, a2, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a5 -; CHECK-NEXT: bltu a2, a4, .LBB48_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, a4 -; CHECK-NEXT: .LBB48_2: -; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: sub a3, a2, a1 +; CHECK-NEXT: sltu a4, a2, a3 +; CHECK-NEXT: addi a4, a4, -1 +; CHECK-NEXT: and a3, a4, a3 +; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: vl8re64.v v0, (a0) ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v24, a4 ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 -; CHECK-NEXT: bltu a2, a1, .LBB48_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a2, a1, .LBB48_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: .LBB48_4: +; CHECK-NEXT: .LBB48_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 Index: llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -152,23 +152,21 @@ ; CHECK-LABEL: vsext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB12_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v10, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB12_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB12_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vsext.vf4 v24, v8, v0.t @@ -183,22 +181,19 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vsext.vf4 v16, v10 ; CHECK-NEXT: bltu a0, a1, .LBB13_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB13_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v24, v8 -; CHECK-NEXT: bltu a0, a1, .LBB13_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB13_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vsext.vf4 v16, v10 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret %v = call @llvm.vp.sext.nxv32i32.nxv32i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v Index: llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -314,23 +314,21 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 @@ -351,23 +349,21 @@ ; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t @@ -381,21 +377,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vfcvt.f.x.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vfcvt.f.x.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v Index: llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.trunc.nxv2i7.nxv2i16(, , i32) @@ -158,24 +158,22 @@ ; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB12_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: vslidedown.vx v0, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB12_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB12_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t @@ -217,25 +215,23 @@ ; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB15_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB15_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB15_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB15_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB15_4: +; CHECK-NEXT: .LBB15_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t @@ -253,25 +249,23 @@ ; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB16_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB16_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB16_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB16_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB16_4: +; CHECK-NEXT: .LBB16_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t @@ -291,90 +285,86 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a4, a1, 1 -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: mv a5, a2 -; CHECK-NEXT: bltu a2, a4, .LBB17_2 +; CHECK-NEXT: srli a3, a1, 2 +; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v25, v0, a3 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re64.v v8, (a3) +; CHECK-NEXT: slli a3, a1, 1 +; CHECK-NEXT: sub a4, a2, a3 +; CHECK-NEXT: sltu a5, a2, a4 +; CHECK-NEXT: addi a5, a5, -1 +; CHECK-NEXT: and a4, a5, a4 +; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: sltu a6, a4, a5 +; CHECK-NEXT: addi a6, a6, -1 +; CHECK-NEXT: and a6, a6, a5 +; CHECK-NEXT: srli a5, a1, 3 +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vslidedown.vx v0, v25, a5 +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma +; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t +; CHECK-NEXT: bltu a4, a1, .LBB17_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB17_2: -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma -; CHECK-NEXT: sub a7, a5, a1 -; CHECK-NEXT: vslidedown.vx v0, v24, a3 -; CHECK-NEXT: bltu a5, a7, .LBB17_4 +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v26, v1, a5 +; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t +; CHECK-NEXT: bltu a2, a3, .LBB17_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a6, a7 +; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB17_4: -; CHECK-NEXT: srli a7, a1, 2 -; CHECK-NEXT: slli t0, a1, 3 -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v12, v16, 0, v0.t -; CHECK-NEXT: bltu a5, a1, .LBB17_6 -; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a5, a1 -; CHECK-NEXT: .LBB17_6: -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v1, v24, a7 -; CHECK-NEXT: add a7, a0, t0 -; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma -; CHECK-NEXT: sub a4, a2, a4 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: slli a5, a5, 3 -; CHECK-NEXT: add a5, sp, a5 -; CHECK-NEXT: addi a5, a5, 16 -; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload -; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t -; CHECK-NEXT: bltu a2, a4, .LBB17_8 -; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: mv a6, a4 -; CHECK-NEXT: .LBB17_8: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma -; CHECK-NEXT: vl8re64.v v16, (a7) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: sub a4, a6, a1 -; CHECK-NEXT: vslidedown.vx v0, v1, a3 -; CHECK-NEXT: bltu a6, a4, .LBB17_10 -; CHECK-NEXT: # %bb.9: -; CHECK-NEXT: mv a2, a4 -; CHECK-NEXT: .LBB17_10: -; CHECK-NEXT: vl8re64.v v16, (a0) -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: sub a0, a2, a1 +; CHECK-NEXT: sltu a3, a2, a0 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a0, a3, a0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vnsrl.wi v20, v24, 0, v0.t -; CHECK-NEXT: bltu a6, a1, .LBB17_12 -; CHECK-NEXT: # %bb.11: -; CHECK-NEXT: mv a6, a1 -; CHECK-NEXT: .LBB17_12: -; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma +; CHECK-NEXT: vnsrl.wi v12, v24, 0, v0.t +; CHECK-NEXT: bltu a2, a1, .LBB17_6 +; CHECK-NEXT: # %bb.5: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB17_6: +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -314,23 +314,21 @@ ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB25_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: .LBB25_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 @@ -351,23 +349,21 @@ ; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB26_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: .LBB26_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t @@ -381,21 +377,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vfcvt.f.xu.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 -; CHECK-NEXT: bltu a0, a1, .LBB27_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vfcvt.f.xu.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v Index: llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -152,23 +152,21 @@ ; CHECK-LABEL: vzext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a4 -; CHECK-NEXT: bltu a0, a3, .LBB12_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v10, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB12_4 -; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: bltu a0, a1, .LBB12_2 +; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vzext.vf4 v24, v8, v0.t @@ -183,22 +181,19 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vzext.vf4 v16, v10 ; CHECK-NEXT: bltu a0, a1, .LBB13_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB13_2: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v24, v8 -; CHECK-NEXT: bltu a0, a1, .LBB13_4 -; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: .LBB13_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vzext.vf4 v16, v10 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret %v = call @llvm.vp.zext.nxv32i32.nxv32i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v Index: llvm/test/CodeGen/RISCV/usub_sat.ll =================================================================== --- llvm/test/CodeGen/RISCV/usub_sat.ll +++ llvm/test/CodeGen/RISCV/usub_sat.ll @@ -13,24 +13,18 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a2, a1, .LBB0_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: subw a1, a0, a1 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a2, a1, .LBB0_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB0_2: +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func: @@ -57,29 +51,23 @@ ; RV32I-NEXT: sub a2, a0, a2 ; RV32I-NEXT: beq a3, a1, .LBB1_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a4, a1, a3 +; RV32I-NEXT: sltu a0, a1, a3 ; RV32I-NEXT: j .LBB1_3 ; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: sltu a4, a0, a2 +; RV32I-NEXT: sltu a0, a0, a2 ; RV32I-NEXT: .LBB1_3: -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bnez a4, .LBB1_5 -; RV32I-NEXT: # %bb.4: -; RV32I-NEXT: mv a0, a2 -; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: .LBB1_5: +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: and a0, a1, a2 +; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func2: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a2, a1, .LBB1_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB1_2: +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func2: @@ -90,18 +78,15 @@ ; RV32IZbb-NEXT: sub a2, a0, a2 ; RV32IZbb-NEXT: beq a3, a1, .LBB1_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: sltu a4, a1, a3 +; RV32IZbb-NEXT: sltu a0, a1, a3 ; RV32IZbb-NEXT: j .LBB1_3 ; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: sltu a4, a0, a2 +; RV32IZbb-NEXT: sltu a0, a0, a2 ; RV32IZbb-NEXT: .LBB1_3: -; RV32IZbb-NEXT: li a0, 0 -; RV32IZbb-NEXT: li a1, 0 -; RV32IZbb-NEXT: bnez a4, .LBB1_5 -; RV32IZbb-NEXT: # %bb.4: -; RV32IZbb-NEXT: mv a0, a2 -; RV32IZbb-NEXT: mv a1, a3 -; RV32IZbb-NEXT: .LBB1_5: +; RV32IZbb-NEXT: snez a0, a0 +; RV32IZbb-NEXT: addi a1, a0, -1 +; RV32IZbb-NEXT: and a0, a1, a2 +; RV32IZbb-NEXT: and a1, a1, a3 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func2: @@ -116,24 +101,18 @@ define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind { ; RV32I-LABEL: func16: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a2, a1, .LBB2_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB2_2: +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func16: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a2, a1, .LBB2_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB2_2: +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func16: @@ -154,24 +133,18 @@ define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind { ; RV32I-LABEL: func8: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a2, a1, .LBB3_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB3_2: +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a2, a1, .LBB3_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB3_2: +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: @@ -192,24 +165,18 @@ define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind { ; RV32I-LABEL: func3: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a2, a1, .LBB4_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func3: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a2, a1, .LBB4_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB4_2: +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func3: Index: llvm/test/CodeGen/RISCV/usub_sat_plus.ll =================================================================== --- llvm/test/CodeGen/RISCV/usub_sat_plus.ll +++ llvm/test/CodeGen/RISCV/usub_sat_plus.ll @@ -13,26 +13,21 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a3, a0 -; RV32I-NEXT: mul a0, a1, a2 -; RV32I-NEXT: sub a1, a3, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a3, a1, .LBB0_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: sub a1, a0, a1 +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func32: ; RV64I: # %bb.0: ; RV64I-NEXT: mulw a1, a1, a2 ; RV64I-NEXT: subw a1, a0, a1 -; RV64I-NEXT: sext.w a2, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a2, a1, .LBB0_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB0_2: +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func32: @@ -63,29 +58,23 @@ ; RV32I-NEXT: sub a3, a0, a4 ; RV32I-NEXT: beq a2, a1, .LBB1_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a4, a1, a2 +; RV32I-NEXT: sltu a0, a1, a2 ; RV32I-NEXT: j .LBB1_3 ; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: sltu a4, a0, a3 +; RV32I-NEXT: sltu a0, a0, a3 ; RV32I-NEXT: .LBB1_3: -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: bnez a4, .LBB1_5 -; RV32I-NEXT: # %bb.4: -; RV32I-NEXT: mv a0, a3 -; RV32I-NEXT: mv a1, a2 -; RV32I-NEXT: .LBB1_5: +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: and a0, a1, a3 +; RV32I-NEXT: and a1, a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func64: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: sub a2, a0, a2 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a1, a2, .LBB1_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a2 -; RV64I-NEXT: .LBB1_2: +; RV64I-NEXT: sub a1, a0, a2 +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func64: @@ -96,18 +85,15 @@ ; RV32IZbb-NEXT: sub a3, a0, a4 ; RV32IZbb-NEXT: beq a2, a1, .LBB1_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: sltu a4, a1, a2 +; RV32IZbb-NEXT: sltu a0, a1, a2 ; RV32IZbb-NEXT: j .LBB1_3 ; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: sltu a4, a0, a3 +; RV32IZbb-NEXT: sltu a0, a0, a3 ; RV32IZbb-NEXT: .LBB1_3: -; RV32IZbb-NEXT: li a0, 0 -; RV32IZbb-NEXT: li a1, 0 -; RV32IZbb-NEXT: bnez a4, .LBB1_5 -; RV32IZbb-NEXT: # %bb.4: -; RV32IZbb-NEXT: mv a0, a3 -; RV32IZbb-NEXT: mv a1, a2 -; RV32IZbb-NEXT: .LBB1_5: +; RV32IZbb-NEXT: snez a0, a0 +; RV32IZbb-NEXT: addi a1, a0, -1 +; RV32IZbb-NEXT: and a0, a1, a3 +; RV32IZbb-NEXT: and a1, a1, a2 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func64: @@ -125,30 +111,26 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a3, 16 ; RV32I-NEXT: addi a3, a3, -1 -; RV32I-NEXT: and a4, a0, a3 -; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: and a0, a0, a3 -; RV32I-NEXT: sub a1, a4, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a4, a1, .LBB2_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB2_2: +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: sub a1, a0, a1 +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func16: ; RV64I: # %bb.0: ; RV64I-NEXT: lui a3, 16 ; RV64I-NEXT: addiw a3, a3, -1 -; RV64I-NEXT: and a4, a0, a3 -; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: and a0, a0, a3 -; RV64I-NEXT: sub a1, a4, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a4, a1, .LBB2_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB2_2: +; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: sub a1, a0, a1 +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func16: @@ -176,28 +158,24 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind { ; RV32I-LABEL: func8: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a3, a0, 255 -; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: andi a0, a0, 255 -; RV32I-NEXT: sub a1, a3, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a3, a1, .LBB3_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB3_2: +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: andi a1, a1, 255 +; RV32I-NEXT: sub a1, a0, a1 +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: -; RV64I-NEXT: andi a3, a0, 255 -; RV64I-NEXT: mulw a0, a1, a2 ; RV64I-NEXT: andi a0, a0, 255 -; RV64I-NEXT: sub a1, a3, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a3, a1, .LBB3_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB3_2: +; RV64I-NEXT: mulw a1, a1, a2 +; RV64I-NEXT: andi a1, a1, 255 +; RV64I-NEXT: sub a1, a0, a1 +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: @@ -225,28 +203,24 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind { ; RV32I-LABEL: func4: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a3, a0, 15 -; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: andi a0, a0, 15 -; RV32I-NEXT: sub a1, a3, a0 -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: bltu a3, a1, .LBB4_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: andi a1, a1, 15 +; RV32I-NEXT: sub a1, a0, a1 +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func4: ; RV64I: # %bb.0: -; RV64I-NEXT: andi a3, a0, 15 -; RV64I-NEXT: mulw a0, a1, a2 ; RV64I-NEXT: andi a0, a0, 15 -; RV64I-NEXT: sub a1, a3, a0 -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: bltu a3, a1, .LBB4_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: .LBB4_2: +; RV64I-NEXT: mulw a1, a1, a2 +; RV64I-NEXT: andi a1, a1, 15 +; RV64I-NEXT: sub a1, a0, a1 +; RV64I-NEXT: sltu a0, a0, a1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func4: