Index: llvm/lib/Target/RISCV/RISCVInstrInfo.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2540,10 +2540,12 @@ MachineBasicBlock &MBB = *MI.getParent(); MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) .add(MI.getOperand(0)) + .addReg(MI.getOperand(0).getReg(), RegState::Undef) .add(MI.getOperand(1)) .add(MI.getOperand(2)) .add(MI.getOperand(3)) - .add(MI.getOperand(4)); + .add(MI.getOperand(4)) + .add(MI.getOperand(5)); MIB.copyImplicitOps(MI); if (LV) { Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1293,6 +1293,22 @@ let HasVecPolicyOp = 1; } +// Like VPseudoBinaryNoMask, but output can be V0. +class VPseudoBinaryMOutNoMask : + Pseudo<(outs RetClass:$rd), + (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Constraint; + let HasVLOp = 1; + let HasSEWOp = 1; +} + // Like VPseudoBinaryMask, but output can be V0. class VPseudoBinaryMOutMask { let VLMul = MInfo.value in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); - def suffix : VPseudoBinaryNoMask; - def suffix # "_TU" : VPseudoBinaryNoMaskTU; + def suffix : VPseudoBinaryNoMaskTU; def suffix # "_MASK" : VPseudoBinaryMaskPolicy, - RISCVMaskedPseudo; + RISCVMaskedPseudo; } } @@ -1995,8 +2010,8 @@ LMULInfo MInfo, string Constraint = ""> { let VLMul = MInfo.value in { - def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX : VPseudoBinaryMOutNoMask; let ForceTailAgnostic = true in def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask, @@ -2013,13 +2028,13 @@ int sew = 0> { let VLMul = lmul.value in { defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX); - def suffix # "_" # emul.MX : VPseudoBinaryNoMask; - def suffix # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU; + def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU; def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy, - RISCVMaskedPseudo; + RISCVMaskedPseudo; } } @@ -3983,24 +3998,6 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; -class VPatBinaryNoMask : - Pat<(result_type (!cast(intrinsic_name) - (result_type (undef)), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - VLOpFrag)), - (!cast(inst) - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - GPR:$vl, sew)>; - class VPatBinaryNoMaskTU(inst#"_TU") + (!cast(inst) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -4495,8 +4492,6 @@ VReg result_reg_class, VReg op1_reg_class, DAGOperand op2_kind> { - def : VPatBinaryNoMask; def : VPatBinaryNoMaskTU; def : VPatBinaryMaskTA.Predicates in { - def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector (undef)), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.Log2SEW)>; def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), VLOpFrag)), - (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_TU") + (!cast("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2, @@ -5660,10 +5647,11 @@ (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), VLOpFrag)), - (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (!cast("PseudoVADD_VI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, (NegImm simm5_plus1:$rs2), GPR:$vl, - vti.Log2SEW)>; + vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), @@ -6322,10 +6310,9 @@ def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef), (vti.Vector vti.RegClass:$rs1), (XLenVT 1), VLOpFrag)), - (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs1, - GPR:$vl, - vti.Log2SEW)>; + (!cast("PseudoVADD_VV_"#vti.LMul.MX) + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, + vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs1), (XLenVT 1), Index: llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -83,8 +83,8 @@ (op_type op_reg_class:$rs2))), (!cast( !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", - instruction_name#"_VV_"# vlmul.MX#"_TU")) + instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_VV_"# vlmul.MX)) (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, @@ -107,8 +107,8 @@ (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))))), (!cast( !if(isSEWAware, - instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", - instruction_name#_#suffix#_# vlmul.MX#"_TU")) + instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#_#suffix#_# vlmul.MX)) (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, xop_kind:$rs2, @@ -158,8 +158,8 @@ (vop_type (SplatFPOp xop_kind:$rs2)))), (!cast( !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", - instruction_name#"_"#vlmul.MX#"_TU")) + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_"#vlmul.MX)) (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, (xop_type xop_kind:$rs2), @@ -188,8 +188,8 @@ (fvti.Vector fvti.RegClass:$rs1))), (!cast( !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_TU", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_TU")) + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Scalar fvti.ScalarRegClass:$rs2), @@ -410,11 +410,13 @@ def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, + vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_VX_"#vti.LMul.MX) - vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, + GPR:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; } } } @@ -434,7 +436,8 @@ def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_WX_"#vti.LMul.MX) - wti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, + vti.AVL, vti.Log2SEW, TU_MU)>; } } } @@ -490,7 +493,8 @@ (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, + vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -498,13 +502,15 @@ (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, + vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) - vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, + vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; } } } @@ -527,11 +533,13 @@ (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) - wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, + vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (!cast(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) - wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, + vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; } } } @@ -716,15 +724,21 @@ // Handle VRSUB specially since it's the only integer binary op with reversed // pattern operands foreach vti = AllIntegerVectors in { + // The AddedComplexity here is covering up a missing matcher for + // widening vwsub.vx which can recognize a extended folded into the + // scalar of the splat. + let AddedComplexity = 20 in let Predicates = GetVTypePredicates.Predicates in { def : Pat<(sub (vti.Vector (SplatPat (XLenVT GPR:$rs2))), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) - vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, + vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) - vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, + simm5:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; } } @@ -746,15 +760,18 @@ def : Pat<(shl (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADD_VV_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, + vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, + vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, + vti.AVL, vti.Log2SEW, TU_MU)>; } } @@ -788,7 +805,8 @@ def : Pat<(shl (vti.Vector vti.RegClass:$rs1), (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVADD_VV_"# vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, + vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; } @@ -1049,29 +1067,35 @@ // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX) - vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TU_MU)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) - vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2))), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg vti.RegClass:$rs2)))), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), (!cast("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; } } Index: llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1200,7 +1200,8 @@ (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WV_"#vti.LMul.MX) - wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; def : Pat< (vti.Vector (riscv_trunc_vector_vl @@ -1209,7 +1210,8 @@ (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) - wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; } } } @@ -1229,7 +1231,8 @@ (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WI_"#vti.LMul.MX) - wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; } } @@ -1342,7 +1345,8 @@ srcvalue, (wti.Mask true_mask), VLOpFrag), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) - wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; } } @@ -1455,12 +1459,14 @@ (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) - wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), (!cast(instruction_name#"_WI_"#vti.LMul.MX) - wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; } } } @@ -1612,7 +1618,8 @@ (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), srcvalue, (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVADD_VV_"# vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; } // 11.7. Vector Narrowing Integer Right Shift Instructions @@ -1913,7 +1920,8 @@ (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) - vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (SplatFPOp vti.ScalarRegClass:$rs2), @@ -2386,59 +2394,34 @@ (!cast("PseudoVID_V_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; - - def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef), - (vti.Vector vti.RegClass:$rs1), - GPR:$rs2, (vti.Mask true_mask), - VLOpFrag)), - (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) - vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), - (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU") + (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; - def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef), - (vti.Vector vti.RegClass:$rs1), - GPR:$rs2, (vti.Mask true_mask), - VLOpFrag)), - (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) - vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), - (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU") + (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; } } foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { - def : Pat<(vti.Vector (riscv_fslide1up_vl (vti.Vector undef), - (vti.Vector vti.RegClass:$rs1), - vti.Scalar:$rs2, (vti.Mask true_mask), - VLOpFrag)), - (!cast("PseudoVFSLIDE1UP_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_fslide1up_vl (vti.Vector vti.RegClass:$rd), (vti.Vector vti.RegClass:$rs1), vti.Scalar:$rs2, (vti.Mask true_mask), VLOpFrag)), - (!cast("PseudoVFSLIDE1UP_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_TU") + (!cast("PseudoVFSLIDE1UP_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; - def : Pat<(vti.Vector (riscv_fslide1down_vl (vti.Vector undef), - (vti.Vector vti.RegClass:$rs1), - vti.Scalar:$rs2, (vti.Mask true_mask), - VLOpFrag)), - (!cast("PseudoVFSLIDE1DOWN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_fslide1down_vl (vti.Vector vti.RegClass:$rd), (vti.Vector vti.RegClass:$rs1), vti.Scalar:$rs2, (vti.Mask true_mask), VLOpFrag)), - (!cast("PseudoVFSLIDE1DOWN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_TU") + (!cast("PseudoVFSLIDE1DOWN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; } } Index: llvm/test/CodeGen/RISCV/double_reduct.ll =================================================================== --- llvm/test/CodeGen/RISCV/double_reduct.ll +++ llvm/test/CodeGen/RISCV/double_reduct.ll @@ -44,11 +44,11 @@ define float @fmin_f32(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: fmin_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 523264 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vfmin.vv v8, v8, v9 -; CHECK-NEXT: vfredmin.vs v8, v8, v10 +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r1 = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a) @@ -60,11 +60,11 @@ define float @fmax_f32(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: fmax_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 1047552 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vfmax.vv v8, v8, v9 -; CHECK-NEXT: vfredmax.vs v8, v8, v10 +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r1 = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a) @@ -78,9 +78,9 @@ ; CHECK-LABEL: add_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: vredsum.vs v8, v8, v10 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r1 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %a) @@ -92,12 +92,11 @@ define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: add_ext_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v12, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vredsum.vs v8, v12, v10 +; CHECK-NEXT: vwaddu.vv v10, v8, v9 +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: vredsum.vs v8, v10, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %ae = zext <16 x i8> %a to <16 x i16> @@ -190,9 +189,9 @@ ; CHECK-LABEL: or_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: vredor.vs v8, v8, v10 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r1 = call i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32> %a) @@ -205,9 +204,9 @@ ; CHECK-LABEL: xor_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vxor.vv v8, v8, v9 -; CHECK-NEXT: vredxor.vs v8, v8, v10 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r1 = call i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32> %a) @@ -235,9 +234,9 @@ ; CHECK-LABEL: umax_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vmaxu.vv v8, v8, v9 -; CHECK-NEXT: vredmaxu.vs v8, v8, v10 +; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r1 = call i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32> %a) @@ -277,11 +276,11 @@ define i32 @smax_i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: smax_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vmax.vv v8, v8, v9 -; CHECK-NEXT: vredmax.vs v8, v8, v10 +; CHECK-NEXT: lui a0, 524288 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r1 = call i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32> %a) Index: llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll =================================================================== --- llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -64,10 +64,10 @@ ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vfwsub.wv v16, v8, v24 +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfwsub.wv v8, v16, v24 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vssubu.vv v4, v4, v8, v0.t ; CHECK-NEXT: vsetvli zero, s0, e32, m8, tu, mu Index: llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll @@ -42,12 +42,12 @@ ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vid.v v11 ; RV32-NEXT: vrgather.vv v10, v8, v11 +; RV32-NEXT: vadd.vi v8, v11, -1 ; RV32-NEXT: lui a0, 11 ; RV32-NEXT: addi a0, a0, -1366 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.v.x v0, a0 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV32-NEXT: vadd.vi v8, v11, -1 ; RV32-NEXT: vrgather.vv v10, v9, v8, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -57,12 +57,12 @@ ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vid.v v11 ; RV64-NEXT: vrgather.vv v10, v8, v11 +; RV64-NEXT: vadd.vi v8, v11, -1 ; RV64-NEXT: lui a0, 11 ; RV64-NEXT: addiw a0, a0, -1366 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v0, a0 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV64-NEXT: vadd.vi v8, v11, -1 ; RV64-NEXT: vrgather.vv v10, v9, v8, v0.t ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -1520,37 +1520,37 @@ ; RV32: # %bb.0: ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a1 -; RV32-NEXT: li a2, 40 -; RV32-NEXT: vsrl.vx v10, v8, a2 -; RV32-NEXT: lui a3, 16 -; RV32-NEXT: addi a3, a3, -256 -; RV32-NEXT: vand.vx v10, v10, a3 -; RV32-NEXT: vor.vv v9, v10, v9 +; RV32-NEXT: vsll.vx v9, v8, a1 +; RV32-NEXT: lui a2, 16 +; RV32-NEXT: addi a2, a2, -256 +; RV32-NEXT: vand.vx v10, v8, a2 +; RV32-NEXT: li a3, 40 +; RV32-NEXT: vsll.vx v10, v10, a3 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v10, v8, a4 +; RV32-NEXT: vsll.vi v10, v10, 24 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v10, 0 +; RV32-NEXT: vmv.v.i v11, 0 ; RV32-NEXT: vmv.v.i v0, 5 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v10, v10, a4, v0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vsrl.vi v11, v8, 8 -; RV32-NEXT: vand.vv v11, v11, v10 -; RV32-NEXT: vsrl.vi v12, v8, 24 -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v12, v12, a4 -; RV32-NEXT: vor.vv v11, v11, v12 -; RV32-NEXT: vor.vv v9, v11, v9 -; RV32-NEXT: vsll.vx v11, v8, a1 -; RV32-NEXT: vand.vx v12, v8, a3 -; RV32-NEXT: vsll.vx v12, v12, a2 -; RV32-NEXT: vor.vv v11, v11, v12 -; RV32-NEXT: vand.vv v10, v8, v10 -; RV32-NEXT: vsll.vi v10, v10, 8 +; RV32-NEXT: vand.vv v12, v8, v11 +; RV32-NEXT: vsll.vi v12, v12, 8 +; RV32-NEXT: vor.vv v10, v10, v12 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: vsrl.vx v10, v8, a1 +; RV32-NEXT: vsrl.vx v12, v8, a3 +; RV32-NEXT: vand.vx v12, v12, a2 +; RV32-NEXT: vor.vv v10, v12, v10 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vand.vv v11, v12, v11 +; RV32-NEXT: vsrl.vi v8, v8, 24 ; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vsll.vi v8, v8, 24 -; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vor.vv v8, v11, v8 -; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 @@ -1797,30 +1797,30 @@ ; RV32-NEXT: addi a3, a3, -256 ; RV32-NEXT: vand.vx v12, v12, a3 ; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vsrl.vi v12, v8, 8 -; RV32-NEXT: li a4, 85 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v12, v12, a4 +; RV32-NEXT: vsrl.vi v14, v8, 8 +; RV32-NEXT: li a5, 85 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; RV32-NEXT: vmv.v.x v0, a4 +; RV32-NEXT: vmv.v.x v0, a5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV32-NEXT: vmv.v.i v14, 0 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v14, v14, a4, v0 +; RV32-NEXT: vmv.v.i v16, 0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v16, v16, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; RV32-NEXT: vand.vv v12, v12, v14 -; RV32-NEXT: vsrl.vi v16, v8, 24 -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v16, v16, a4 -; RV32-NEXT: vor.vv v12, v12, v16 +; RV32-NEXT: vand.vv v14, v14, v16 +; RV32-NEXT: vor.vv v12, v14, v12 ; RV32-NEXT: vor.vv v10, v12, v10 ; RV32-NEXT: vsll.vx v12, v8, a1 -; RV32-NEXT: vand.vx v16, v8, a3 -; RV32-NEXT: vsll.vx v16, v16, a2 -; RV32-NEXT: vor.vv v12, v12, v16 -; RV32-NEXT: vand.vx v16, v8, a4 -; RV32-NEXT: vsll.vi v16, v16, 24 -; RV32-NEXT: vand.vv v8, v8, v14 +; RV32-NEXT: vand.vx v14, v8, a3 +; RV32-NEXT: vsll.vx v14, v14, a2 +; RV32-NEXT: vor.vv v12, v12, v14 +; RV32-NEXT: vand.vx v14, v8, a4 +; RV32-NEXT: vsll.vi v14, v14, 24 +; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v14, v8 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 4 @@ -2070,32 +2070,32 @@ ; RV32-NEXT: addi a3, a3, -256 ; RV32-NEXT: vand.vx v16, v16, a3 ; RV32-NEXT: vor.vv v12, v16, v12 +; RV32-NEXT: vsrl.vi v16, v8, 24 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v16, v16, a4 ; RV32-NEXT: vsrl.vi v20, v8, 8 -; RV32-NEXT: lui a4, 5 -; RV32-NEXT: addi a4, a4, 1365 +; RV32-NEXT: lui a5, 5 +; RV32-NEXT: addi a5, a5, 1365 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma -; RV32-NEXT: vmv.v.x v0, a4 +; RV32-NEXT: vmv.v.x v0, a5 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma -; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v16, v16, a4, v0 +; RV32-NEXT: vmv.v.i v24, 0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v24, v24, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; RV32-NEXT: vand.vv v20, v20, v16 -; RV32-NEXT: vsrl.vi v24, v8, 24 -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v24, v24, a4 -; RV32-NEXT: vor.vv v20, v20, v24 -; RV32-NEXT: vor.vv v12, v20, v12 -; RV32-NEXT: vsll.vx v20, v8, a1 -; RV32-NEXT: vand.vx v24, v8, a3 -; RV32-NEXT: vsll.vx v24, v24, a2 -; RV32-NEXT: vor.vv v20, v20, v24 -; RV32-NEXT: vand.vx v24, v8, a4 -; RV32-NEXT: vsll.vi v24, v24, 24 -; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vand.vv v20, v20, v24 +; RV32-NEXT: vor.vv v16, v20, v16 +; RV32-NEXT: vor.vv v12, v16, v12 +; RV32-NEXT: vsll.vx v16, v8, a1 +; RV32-NEXT: vand.vx v20, v8, a3 +; RV32-NEXT: vsll.vx v20, v20, a2 +; RV32-NEXT: vor.vv v16, v16, v20 +; RV32-NEXT: vand.vx v20, v8, a4 +; RV32-NEXT: vsll.vi v20, v20, 24 +; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: vor.vv v8, v20, v8 +; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 4 ; RV32-NEXT: lui a1, 61681 @@ -2432,53 +2432,53 @@ ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsll.vx v16, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v24, v8, a2 -; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsll.vx v24, v24, a3 -; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: lui a3, 16 +; RV32-NEXT: addi a3, a3, -256 +; RV32-NEXT: vand.vx v24, v24, a3 +; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v16, v8, a4 -; RV32-NEXT: vsll.vi v24, v16, 24 -; RV32-NEXT: li a5, 32 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v8, 8 +; RV32-NEXT: li a4, 32 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: lui a6, 349525 -; RV32-NEXT: addi a6, a6, 1365 +; RV32-NEXT: lui a5, 349525 +; RV32-NEXT: addi a5, a5, 1365 ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma -; RV32-NEXT: lui a7, 1044480 -; RV32-NEXT: vmv.v.x v0, a6 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma -; RV32-NEXT: vmerge.vxm v16, v16, a7, v0 +; RV32-NEXT: lui a6, 1044480 +; RV32-NEXT: vmv.v.x v0, a5 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma +; RV32-NEXT: vmerge.vxm v16, v16, a6, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vand.vv v0, v8, v16 -; RV32-NEXT: vsll.vi v0, v0, 8 +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: lui a6, 4080 +; RV32-NEXT: vsrl.vi v0, v8, 24 +; RV32-NEXT: vand.vx v0, v0, a6 ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: addi a7, sp, 16 ; RV32-NEXT: vl8r.v v0, (a7) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v24, v0, v24 +; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: vs8r.v v24, (a7) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vx v0, v8, a3 -; RV32-NEXT: vand.vx v0, v0, a2 -; RV32-NEXT: vsrl.vx v24, v8, a1 -; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: vsrl.vi v0, v8, 8 -; RV32-NEXT: vand.vv v16, v0, v16 -; RV32-NEXT: vsrl.vi v8, v8, 24 -; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vand.vx v0, v8, a3 +; RV32-NEXT: vsll.vx v0, v0, a2 +; RV32-NEXT: vsll.vx v24, v8, a1 +; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: vand.vv v16, v8, v16 +; RV32-NEXT: vand.vx v8, v8, a6 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 @@ -2488,7 +2488,7 @@ ; RV32-NEXT: vsrl.vi v16, v8, 2 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 @@ -2496,8 +2496,8 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v24, a6 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a5 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vand.vv v8, v8, v24 @@ -2812,53 +2812,53 @@ ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsll.vx v16, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v24, v8, a2 -; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsll.vx v24, v24, a3 -; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: lui a3, 16 +; RV32-NEXT: addi a3, a3, -256 +; RV32-NEXT: vand.vx v24, v24, a3 +; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v16, v8, a4 -; RV32-NEXT: vsll.vi v24, v16, 24 -; RV32-NEXT: li a5, 32 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v8, 8 +; RV32-NEXT: li a4, 32 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: lui a6, 349525 -; RV32-NEXT: addi a6, a6, 1365 +; RV32-NEXT: lui a5, 349525 +; RV32-NEXT: addi a5, a5, 1365 ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma -; RV32-NEXT: lui a7, 1044480 -; RV32-NEXT: vmv.v.x v0, a6 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma -; RV32-NEXT: vmerge.vxm v16, v16, a7, v0 +; RV32-NEXT: lui a6, 1044480 +; RV32-NEXT: vmv.v.x v0, a5 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma +; RV32-NEXT: vmerge.vxm v16, v16, a6, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vand.vv v0, v8, v16 -; RV32-NEXT: vsll.vi v0, v0, 8 +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: lui a6, 4080 +; RV32-NEXT: vsrl.vi v0, v8, 24 +; RV32-NEXT: vand.vx v0, v0, a6 ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: addi a7, sp, 16 ; RV32-NEXT: vl8r.v v0, (a7) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v24, v0, v24 +; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: vs8r.v v24, (a7) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vx v0, v8, a3 -; RV32-NEXT: vand.vx v0, v0, a2 -; RV32-NEXT: vsrl.vx v24, v8, a1 -; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: vsrl.vi v0, v8, 8 -; RV32-NEXT: vand.vv v16, v0, v16 -; RV32-NEXT: vsrl.vi v8, v8, 24 -; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vand.vx v0, v8, a3 +; RV32-NEXT: vsll.vx v0, v0, a2 +; RV32-NEXT: vsll.vx v24, v8, a1 +; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: vand.vv v16, v8, v16 +; RV32-NEXT: vand.vx v8, v8, a6 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 @@ -2868,7 +2868,7 @@ ; RV32-NEXT: vsrl.vi v16, v8, 2 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 @@ -2876,8 +2876,8 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v24, a6 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a5 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vand.vv v8, v8, v24 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -165,38 +165,38 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: li a1, 56 +; RV32-NEXT: vsrl.vx v9, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsrl.vx v10, v8, a2 +; RV32-NEXT: lui a3, 16 +; RV32-NEXT: addi a3, a3, -256 +; RV32-NEXT: vand.vx v10, v10, a3 +; RV32-NEXT: vor.vv v9, v10, v9 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v9, 0 +; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vmv.v.i v0, 5 -; RV32-NEXT: lui a1, 1044480 -; RV32-NEXT: vmerge.vxm v9, v9, a1, v0 +; RV32-NEXT: lui a4, 1044480 +; RV32-NEXT: vmerge.vxm v10, v10, a4, v0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vsrl.vi v10, v8, 8 -; RV32-NEXT: vand.vv v10, v10, v9 -; RV32-NEXT: vsrl.vi v11, v8, 24 -; RV32-NEXT: lui a1, 4080 -; RV32-NEXT: vand.vx v11, v11, a1 -; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsrl.vx v11, v8, a2 -; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsrl.vx v12, v8, a3 -; RV32-NEXT: lui a4, 16 -; RV32-NEXT: addi a4, a4, -256 +; RV32-NEXT: vsrl.vi v11, v8, 8 +; RV32-NEXT: vand.vv v11, v11, v10 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: lui a4, 4080 ; RV32-NEXT: vand.vx v12, v12, a4 -; RV32-NEXT: vor.vv v11, v12, v11 -; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: vand.vv v9, v8, v9 -; RV32-NEXT: vsll.vi v9, v9, 8 -; RV32-NEXT: vand.vx v11, v8, a1 -; RV32-NEXT: vsll.vi v11, v11, 24 +; RV32-NEXT: vor.vv v11, v11, v12 ; RV32-NEXT: vor.vv v9, v11, v9 -; RV32-NEXT: vsll.vx v11, v8, a2 +; RV32-NEXT: vsll.vx v11, v8, a1 +; RV32-NEXT: vand.vx v12, v8, a3 +; RV32-NEXT: vsll.vx v12, v12, a2 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vand.vv v10, v8, v10 +; RV32-NEXT: vsll.vi v10, v10, 8 ; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vsll.vx v8, v8, a3 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vor.vv v8, v11, v8 ; RV32-NEXT: vor.vv v8, v8, v9 -; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 @@ -234,34 +234,34 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: li a1, 56 -; RV64-NEXT: vsrl.vx v9, v8, a1 -; RV64-NEXT: li a2, 40 -; RV64-NEXT: vsrl.vx v10, v8, a2 -; RV64-NEXT: lui a3, 16 -; RV64-NEXT: addiw a3, a3, -256 -; RV64-NEXT: vand.vx v10, v10, a3 -; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vsrl.vi v10, v8, 24 -; RV64-NEXT: lui a4, 4080 -; RV64-NEXT: vand.vx v10, v10, a4 -; RV64-NEXT: vsrl.vi v11, v8, 8 -; RV64-NEXT: li a5, 255 -; RV64-NEXT: slli a5, a5, 24 -; RV64-NEXT: vand.vx v11, v11, a5 -; RV64-NEXT: vor.vv v10, v11, v10 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsll.vi v9, v9, 8 +; RV64-NEXT: lui a2, 4080 +; RV64-NEXT: vand.vx v10, v8, a2 +; RV64-NEXT: vsll.vi v10, v10, 24 ; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vand.vx v10, v8, a5 -; RV64-NEXT: vsll.vi v10, v10, 8 +; RV64-NEXT: li a3, 56 +; RV64-NEXT: vsll.vx v10, v8, a3 +; RV64-NEXT: lui a4, 16 +; RV64-NEXT: addiw a4, a4, -256 ; RV64-NEXT: vand.vx v11, v8, a4 -; RV64-NEXT: vsll.vi v11, v11, 24 +; RV64-NEXT: li a5, 40 +; RV64-NEXT: vsll.vx v11, v11, a5 +; RV64-NEXT: vor.vv v10, v10, v11 +; RV64-NEXT: vor.vv v9, v10, v9 +; RV64-NEXT: vsrl.vx v10, v8, a3 +; RV64-NEXT: vsrl.vx v11, v8, a5 +; RV64-NEXT: vand.vx v11, v11, a4 ; RV64-NEXT: vor.vv v10, v11, v10 -; RV64-NEXT: vsll.vx v11, v8, a1 -; RV64-NEXT: vand.vx v8, v8, a3 -; RV64-NEXT: vsll.vx v8, v8, a2 -; RV64-NEXT: vor.vv v8, v11, v8 +; RV64-NEXT: vsrl.vi v11, v8, 24 +; RV64-NEXT: vand.vx v11, v11, a2 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v11 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 4 ; RV64-NEXT: lui a1, 61681 ; RV64-NEXT: addiw a1, a1, -241 @@ -704,20 +704,20 @@ ; LMULMAX2-RV32-NEXT: addi a3, a3, -256 ; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a3 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24 -; LMULMAX2-RV32-NEXT: lui a4, 4080 -; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4 -; LMULMAX2-RV32-NEXT: li a5, 85 +; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 8 +; LMULMAX2-RV32-NEXT: li a4, 85 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; LMULMAX2-RV32-NEXT: vmv.v.x v0, a5 +; LMULMAX2-RV32-NEXT: vmv.v.x v0, a4 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV32-NEXT: lui a5, 1044480 -; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a5, v0 +; LMULMAX2-RV32-NEXT: lui a4, 1044480 +; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a4, v0 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8 -; LMULMAX2-RV32-NEXT: vand.vv v16, v16, v14 -; LMULMAX2-RV32-NEXT: vor.vv v12, v16, v12 +; LMULMAX2-RV32-NEXT: vand.vv v12, v12, v14 +; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 24 +; LMULMAX2-RV32-NEXT: lui a4, 4080 +; LMULMAX2-RV32-NEXT: vand.vx v16, v16, a4 +; LMULMAX2-RV32-NEXT: vor.vv v12, v12, v16 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 ; LMULMAX2-RV32-NEXT: vsll.vx v12, v8, a1 ; LMULMAX2-RV32-NEXT: vand.vx v16, v8, a3 @@ -767,34 +767,34 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: li a1, 56 -; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: li a2, 40 -; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a2 -; LMULMAX2-RV64-NEXT: lui a3, 16 -; LMULMAX2-RV64-NEXT: addiw a3, a3, -256 -; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a3 -; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV64-NEXT: vsrl.vi v12, v8, 24 -; LMULMAX2-RV64-NEXT: lui a4, 4080 -; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a4 -; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 8 -; LMULMAX2-RV64-NEXT: li a5, 255 -; LMULMAX2-RV64-NEXT: slli a5, a5, 24 -; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a5 -; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12 +; LMULMAX2-RV64-NEXT: li a1, 255 +; LMULMAX2-RV64-NEXT: slli a1, a1, 24 +; LMULMAX2-RV64-NEXT: vand.vx v10, v8, a1 +; LMULMAX2-RV64-NEXT: vsll.vi v10, v10, 8 +; LMULMAX2-RV64-NEXT: lui a2, 4080 +; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a2 +; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 24 ; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a5 -; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 8 +; LMULMAX2-RV64-NEXT: li a3, 56 +; LMULMAX2-RV64-NEXT: vsll.vx v12, v8, a3 +; LMULMAX2-RV64-NEXT: lui a4, 16 +; LMULMAX2-RV64-NEXT: addiw a4, a4, -256 ; LMULMAX2-RV64-NEXT: vand.vx v14, v8, a4 -; LMULMAX2-RV64-NEXT: vsll.vi v14, v14, 24 +; LMULMAX2-RV64-NEXT: li a5, 40 +; LMULMAX2-RV64-NEXT: vsll.vx v14, v14, a5 +; LMULMAX2-RV64-NEXT: vor.vv v12, v12, v14 +; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10 +; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a3 +; LMULMAX2-RV64-NEXT: vsrl.vx v14, v8, a5 +; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a4 ; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12 -; LMULMAX2-RV64-NEXT: vsll.vx v14, v8, a1 -; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a3 -; LMULMAX2-RV64-NEXT: vsll.vx v8, v8, a2 -; LMULMAX2-RV64-NEXT: vor.vv v8, v14, v8 +; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 24 +; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a2 +; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 +; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 +; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v14 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12 -; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 +; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX2-RV64-NEXT: lui a1, 61681 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -241 @@ -828,40 +828,40 @@ ; LMULMAX1-RV32-LABEL: bitreverse_v4i64: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v10, (a1) +; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) +; LMULMAX1-RV32-NEXT: li a2, 56 +; LMULMAX1-RV32-NEXT: vsrl.vx v9, v10, a2 +; LMULMAX1-RV32-NEXT: li a3, 40 +; LMULMAX1-RV32-NEXT: vsrl.vx v11, v10, a3 +; LMULMAX1-RV32-NEXT: lui a4, 16 +; LMULMAX1-RV32-NEXT: addi a4, a4, -256 +; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4 +; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v9 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0 ; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 -; LMULMAX1-RV32-NEXT: lui a2, 1044480 -; LMULMAX1-RV32-NEXT: vmerge.vxm v9, v9, a2, v0 +; LMULMAX1-RV32-NEXT: lui a5, 1044480 +; LMULMAX1-RV32-NEXT: vmerge.vxm v9, v9, a5, v0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v10, 8 -; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v9 -; LMULMAX1-RV32-NEXT: vsrl.vi v12, v10, 24 -; LMULMAX1-RV32-NEXT: lui a2, 4080 -; LMULMAX1-RV32-NEXT: vand.vx v12, v12, a2 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v12 -; LMULMAX1-RV32-NEXT: li a3, 56 -; LMULMAX1-RV32-NEXT: vsrl.vx v12, v10, a3 -; LMULMAX1-RV32-NEXT: li a4, 40 -; LMULMAX1-RV32-NEXT: vsrl.vx v13, v10, a4 -; LMULMAX1-RV32-NEXT: lui a5, 16 -; LMULMAX1-RV32-NEXT: addi a5, a5, -256 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v10, 8 +; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v9 +; LMULMAX1-RV32-NEXT: vsrl.vi v13, v10, 24 +; LMULMAX1-RV32-NEXT: lui a5, 4080 ; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 -; LMULMAX1-RV32-NEXT: vor.vv v12, v13, v12 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v12 -; LMULMAX1-RV32-NEXT: vand.vv v12, v10, v9 -; LMULMAX1-RV32-NEXT: vsll.vi v12, v12, 8 -; LMULMAX1-RV32-NEXT: vand.vx v13, v10, a2 -; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24 -; LMULMAX1-RV32-NEXT: vor.vv v12, v13, v12 -; LMULMAX1-RV32-NEXT: vsll.vx v13, v10, a3 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vor.vv v11, v12, v11 +; LMULMAX1-RV32-NEXT: vsll.vx v12, v10, a2 +; LMULMAX1-RV32-NEXT: vand.vx v13, v10, a4 +; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vand.vv v13, v10, v9 +; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 8 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a5 -; LMULMAX1-RV32-NEXT: vsll.vx v10, v10, a4 -; LMULMAX1-RV32-NEXT: vor.vv v10, v13, v10 -; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v12 +; LMULMAX1-RV32-NEXT: vsll.vi v10, v10, 24 +; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v13 +; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 ; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v11 ; LMULMAX1-RV32-NEXT: vsrl.vi v11, v10, 4 ; LMULMAX1-RV32-NEXT: lui a6, 61681 @@ -893,26 +893,26 @@ ; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v14 ; LMULMAX1-RV32-NEXT: vadd.vv v10, v10, v10 ; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 8 -; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v9 -; LMULMAX1-RV32-NEXT: vsrl.vi v15, v8, 24 -; LMULMAX1-RV32-NEXT: vand.vx v15, v15, a2 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v15 +; LMULMAX1-RV32-NEXT: vsrl.vx v11, v8, a2 ; LMULMAX1-RV32-NEXT: vsrl.vx v15, v8, a3 -; LMULMAX1-RV32-NEXT: vsrl.vx v16, v8, a4 +; LMULMAX1-RV32-NEXT: vand.vx v15, v15, a4 +; LMULMAX1-RV32-NEXT: vor.vv v11, v15, v11 +; LMULMAX1-RV32-NEXT: vsrl.vi v15, v8, 8 +; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v9 +; LMULMAX1-RV32-NEXT: vsrl.vi v16, v8, 24 ; LMULMAX1-RV32-NEXT: vand.vx v16, v16, a5 -; LMULMAX1-RV32-NEXT: vor.vv v15, v16, v15 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v15 +; LMULMAX1-RV32-NEXT: vor.vv v15, v15, v16 +; LMULMAX1-RV32-NEXT: vor.vv v11, v15, v11 +; LMULMAX1-RV32-NEXT: vsll.vx v15, v8, a2 +; LMULMAX1-RV32-NEXT: vand.vx v16, v8, a4 +; LMULMAX1-RV32-NEXT: vsll.vx v16, v16, a3 +; LMULMAX1-RV32-NEXT: vor.vv v15, v15, v16 ; LMULMAX1-RV32-NEXT: vand.vv v9, v8, v9 ; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 8 -; LMULMAX1-RV32-NEXT: vand.vx v15, v8, a2 -; LMULMAX1-RV32-NEXT: vsll.vi v15, v15, 24 -; LMULMAX1-RV32-NEXT: vor.vv v9, v15, v9 -; LMULMAX1-RV32-NEXT: vsll.vx v15, v8, a3 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a5 -; LMULMAX1-RV32-NEXT: vsll.vx v8, v8, a4 -; LMULMAX1-RV32-NEXT: vor.vv v8, v15, v8 +; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 24 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vor.vv v8, v15, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 4 ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v12 @@ -939,34 +939,34 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: li a2, 56 -; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: li a3, 40 -; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, a3 -; LMULMAX1-RV64-NEXT: lui a4, 16 -; LMULMAX1-RV64-NEXT: addiw a4, a4, -256 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4 -; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vsrl.vi v11, v9, 24 -; LMULMAX1-RV64-NEXT: lui a5, 4080 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 8 -; LMULMAX1-RV64-NEXT: li a6, 255 -; LMULMAX1-RV64-NEXT: slli a6, a6, 24 -; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6 -; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 +; LMULMAX1-RV64-NEXT: li a2, 255 +; LMULMAX1-RV64-NEXT: slli a2, a2, 24 +; LMULMAX1-RV64-NEXT: vand.vx v10, v9, a2 +; LMULMAX1-RV64-NEXT: vsll.vi v10, v10, 8 +; LMULMAX1-RV64-NEXT: lui a3, 4080 +; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a3 +; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 24 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a6 -; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8 +; LMULMAX1-RV64-NEXT: li a4, 56 +; LMULMAX1-RV64-NEXT: vsll.vx v11, v9, a4 +; LMULMAX1-RV64-NEXT: lui a5, 16 +; LMULMAX1-RV64-NEXT: addiw a5, a5, -256 ; LMULMAX1-RV64-NEXT: vand.vx v12, v9, a5 -; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24 +; LMULMAX1-RV64-NEXT: li a6, 40 +; LMULMAX1-RV64-NEXT: vsll.vx v12, v12, a6 +; LMULMAX1-RV64-NEXT: vor.vv v11, v11, v12 +; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 +; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, a4 +; LMULMAX1-RV64-NEXT: vsrl.vx v12, v9, a6 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a5 ; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 -; LMULMAX1-RV64-NEXT: vsll.vx v12, v9, a2 -; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4 -; LMULMAX1-RV64-NEXT: vsll.vx v9, v9, a3 -; LMULMAX1-RV64-NEXT: vor.vv v9, v12, v9 +; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 24 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a3 +; LMULMAX1-RV64-NEXT: vsrl.vi v9, v9, 8 +; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a2 +; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v12 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v11 -; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 +; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 4 ; LMULMAX1-RV64-NEXT: lui a7, 61681 ; LMULMAX1-RV64-NEXT: addiw a7, a7, -241 @@ -994,27 +994,27 @@ ; LMULMAX1-RV64-NEXT: vand.vx v9, v9, t1 ; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v9 ; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9 -; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, a3 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4 -; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 24 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 8 -; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6 -; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 +; LMULMAX1-RV64-NEXT: vand.vx v10, v8, a2 +; LMULMAX1-RV64-NEXT: vsll.vi v10, v10, 8 +; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a3 +; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 24 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a6 -; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8 +; LMULMAX1-RV64-NEXT: vsll.vx v11, v8, a4 ; LMULMAX1-RV64-NEXT: vand.vx v12, v8, a5 -; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24 +; LMULMAX1-RV64-NEXT: vsll.vx v12, v12, a6 +; LMULMAX1-RV64-NEXT: vor.vv v11, v11, v12 +; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 +; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, a4 +; LMULMAX1-RV64-NEXT: vsrl.vx v12, v8, a6 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a5 ; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 -; LMULMAX1-RV64-NEXT: vsll.vx v12, v8, a2 -; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4 -; LMULMAX1-RV64-NEXT: vsll.vx v8, v8, a3 -; LMULMAX1-RV64-NEXT: vor.vv v8, v12, v8 +; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 24 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a3 +; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 +; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a2 +; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v12 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 -; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 +; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a7 ; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a7 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -503,37 +503,37 @@ ; RV32: # %bb.0: ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a1 -; RV32-NEXT: li a2, 40 -; RV32-NEXT: vsrl.vx v10, v8, a2 -; RV32-NEXT: lui a3, 16 -; RV32-NEXT: addi a3, a3, -256 -; RV32-NEXT: vand.vx v10, v10, a3 -; RV32-NEXT: vor.vv v9, v10, v9 +; RV32-NEXT: vsll.vx v9, v8, a1 +; RV32-NEXT: lui a2, 16 +; RV32-NEXT: addi a2, a2, -256 +; RV32-NEXT: vand.vx v10, v8, a2 +; RV32-NEXT: li a3, 40 +; RV32-NEXT: vsll.vx v10, v10, a3 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v10, v8, a4 +; RV32-NEXT: vsll.vi v10, v10, 24 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v10, 0 +; RV32-NEXT: vmv.v.i v11, 0 ; RV32-NEXT: vmv.v.i v0, 5 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v10, v10, a4, v0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vsrl.vi v11, v8, 8 -; RV32-NEXT: vand.vv v11, v11, v10 -; RV32-NEXT: vsrl.vi v12, v8, 24 -; RV32-NEXT: lui a0, 4080 -; RV32-NEXT: vand.vx v12, v12, a0 -; RV32-NEXT: vor.vv v11, v11, v12 -; RV32-NEXT: vor.vv v9, v11, v9 -; RV32-NEXT: vsll.vx v11, v8, a1 -; RV32-NEXT: vand.vx v12, v8, a3 -; RV32-NEXT: vsll.vx v12, v12, a2 -; RV32-NEXT: vor.vv v11, v11, v12 -; RV32-NEXT: vand.vv v10, v8, v10 -; RV32-NEXT: vsll.vi v10, v10, 8 -; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: vsll.vi v8, v8, 24 -; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vand.vv v12, v8, v11 +; RV32-NEXT: vsll.vi v12, v12, 8 +; RV32-NEXT: vor.vv v10, v10, v12 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: vsrl.vx v10, v8, a1 +; RV32-NEXT: vsrl.vx v12, v8, a3 +; RV32-NEXT: vand.vx v12, v12, a2 +; RV32-NEXT: vor.vv v10, v12, v10 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vand.vv v11, v12, v11 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: vand.vx v8, v8, a4 ; RV32-NEXT: vor.vv v8, v11, v8 -; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bswap_v2i64_unmasked: @@ -666,30 +666,30 @@ ; RV32-NEXT: addi a3, a3, -256 ; RV32-NEXT: vand.vx v12, v12, a3 ; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vsrl.vi v12, v8, 8 -; RV32-NEXT: li a4, 85 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v12, v12, a4 +; RV32-NEXT: vsrl.vi v14, v8, 8 +; RV32-NEXT: li a5, 85 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; RV32-NEXT: vmv.v.x v0, a4 +; RV32-NEXT: vmv.v.x v0, a5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV32-NEXT: vmv.v.i v14, 0 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v14, v14, a4, v0 +; RV32-NEXT: vmv.v.i v16, 0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v16, v16, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; RV32-NEXT: vand.vv v12, v12, v14 -; RV32-NEXT: vsrl.vi v16, v8, 24 -; RV32-NEXT: lui a0, 4080 -; RV32-NEXT: vand.vx v16, v16, a0 -; RV32-NEXT: vor.vv v12, v12, v16 +; RV32-NEXT: vand.vv v14, v14, v16 +; RV32-NEXT: vor.vv v12, v14, v12 ; RV32-NEXT: vor.vv v10, v12, v10 ; RV32-NEXT: vsll.vx v12, v8, a1 -; RV32-NEXT: vand.vx v16, v8, a3 -; RV32-NEXT: vsll.vx v16, v16, a2 -; RV32-NEXT: vor.vv v12, v12, v16 -; RV32-NEXT: vand.vx v16, v8, a0 -; RV32-NEXT: vsll.vi v16, v16, 24 -; RV32-NEXT: vand.vv v8, v8, v14 +; RV32-NEXT: vand.vx v14, v8, a3 +; RV32-NEXT: vsll.vx v14, v14, a2 +; RV32-NEXT: vor.vv v12, v12, v14 +; RV32-NEXT: vand.vx v14, v8, a4 +; RV32-NEXT: vsll.vi v14, v14, 24 +; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v14, v8 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: ret @@ -825,32 +825,32 @@ ; RV32-NEXT: addi a3, a3, -256 ; RV32-NEXT: vand.vx v16, v16, a3 ; RV32-NEXT: vor.vv v12, v16, v12 +; RV32-NEXT: vsrl.vi v16, v8, 24 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v16, v16, a4 ; RV32-NEXT: vsrl.vi v20, v8, 8 -; RV32-NEXT: lui a4, 5 -; RV32-NEXT: addi a4, a4, 1365 +; RV32-NEXT: lui a5, 5 +; RV32-NEXT: addi a5, a5, 1365 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma -; RV32-NEXT: vmv.v.x v0, a4 +; RV32-NEXT: vmv.v.x v0, a5 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma -; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v16, v16, a4, v0 +; RV32-NEXT: vmv.v.i v24, 0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v24, v24, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; RV32-NEXT: vand.vv v20, v20, v16 -; RV32-NEXT: vsrl.vi v24, v8, 24 -; RV32-NEXT: lui a0, 4080 -; RV32-NEXT: vand.vx v24, v24, a0 -; RV32-NEXT: vor.vv v20, v20, v24 -; RV32-NEXT: vor.vv v12, v20, v12 -; RV32-NEXT: vsll.vx v20, v8, a1 -; RV32-NEXT: vand.vx v24, v8, a3 -; RV32-NEXT: vsll.vx v24, v24, a2 -; RV32-NEXT: vor.vv v20, v20, v24 -; RV32-NEXT: vand.vx v24, v8, a0 -; RV32-NEXT: vsll.vi v24, v24, 24 -; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vand.vv v20, v20, v24 +; RV32-NEXT: vor.vv v16, v20, v16 +; RV32-NEXT: vor.vv v12, v16, v12 +; RV32-NEXT: vsll.vx v16, v8, a1 +; RV32-NEXT: vand.vx v20, v8, a3 +; RV32-NEXT: vsll.vx v20, v20, a2 +; RV32-NEXT: vor.vv v16, v16, v20 +; RV32-NEXT: vand.vx v20, v8, a4 +; RV32-NEXT: vsll.vi v20, v20, 24 +; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: vor.vv v8, v20, v8 +; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: ret ; @@ -1075,48 +1075,49 @@ ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsll.vx v16, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v24, v8, a2 -; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsll.vx v24, v24, a3 -; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: lui a3, 16 +; RV32-NEXT: addi a3, a3, -256 +; RV32-NEXT: vand.vx v24, v24, a3 +; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v16, v8, a4 -; RV32-NEXT: vsll.vi v24, v16, 24 -; RV32-NEXT: li a5, 32 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v8, 8 +; RV32-NEXT: li a4, 32 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: lui a6, 349525 -; RV32-NEXT: addi a6, a6, 1365 +; RV32-NEXT: lui a5, 349525 +; RV32-NEXT: addi a5, a5, 1365 ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma -; RV32-NEXT: lui a7, 1044480 -; RV32-NEXT: vmv.v.x v0, a6 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma -; RV32-NEXT: vmerge.vxm v16, v16, a7, v0 +; RV32-NEXT: lui a6, 1044480 +; RV32-NEXT: vmv.v.x v0, a5 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma +; RV32-NEXT: vmerge.vxm v16, v16, a6, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vand.vv v0, v8, v16 -; RV32-NEXT: vsll.vi v0, v0, 8 +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: lui a0, 4080 +; RV32-NEXT: vsrl.vi v0, v8, 24 +; RV32-NEXT: vand.vx v0, v0, a0 +; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vx v0, v8, a3 +; RV32-NEXT: vsll.vx v0, v0, a2 +; RV32-NEXT: vsll.vx v24, v8, a1 +; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: vand.vv v16, v8, v16 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vx v0, v8, a3 -; RV32-NEXT: vand.vx v0, v0, a2 -; RV32-NEXT: vsrl.vx v24, v8, a1 -; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: vsrl.vi v0, v8, 8 -; RV32-NEXT: vand.vv v16, v0, v16 -; RV32-NEXT: vsrl.vi v8, v8, 24 -; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 @@ -1344,48 +1345,49 @@ ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsll.vx v16, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v24, v8, a2 -; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsll.vx v24, v24, a3 -; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: lui a3, 16 +; RV32-NEXT: addi a3, a3, -256 +; RV32-NEXT: vand.vx v24, v24, a3 +; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v16, v8, a4 -; RV32-NEXT: vsll.vi v24, v16, 24 -; RV32-NEXT: li a5, 32 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v8, 8 +; RV32-NEXT: li a4, 32 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v16, 0 -; RV32-NEXT: lui a6, 349525 -; RV32-NEXT: addi a6, a6, 1365 +; RV32-NEXT: lui a5, 349525 +; RV32-NEXT: addi a5, a5, 1365 ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma -; RV32-NEXT: lui a7, 1044480 -; RV32-NEXT: vmv.v.x v0, a6 -; RV32-NEXT: vsetvli zero, a5, e32, m8, ta, ma -; RV32-NEXT: vmerge.vxm v16, v16, a7, v0 +; RV32-NEXT: lui a6, 1044480 +; RV32-NEXT: vmv.v.x v0, a5 +; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma +; RV32-NEXT: vmerge.vxm v16, v16, a6, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vand.vv v0, v8, v16 -; RV32-NEXT: vsll.vi v0, v0, 8 +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: lui a0, 4080 +; RV32-NEXT: vsrl.vi v0, v8, 24 +; RV32-NEXT: vand.vx v0, v0, a0 ; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vx v0, v8, a3 +; RV32-NEXT: vsll.vx v0, v0, a2 +; RV32-NEXT: vsll.vx v24, v8, a1 +; RV32-NEXT: vor.vv v24, v24, v0 +; RV32-NEXT: vand.vv v16, v8, v16 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vx v0, v8, a3 -; RV32-NEXT: vand.vx v0, v0, a2 -; RV32-NEXT: vsrl.vx v24, v8, a1 -; RV32-NEXT: vor.vv v24, v0, v24 -; RV32-NEXT: vsrl.vi v0, v8, 8 -; RV32-NEXT: vand.vv v16, v0, v16 -; RV32-NEXT: vsrl.vi v8, v8, 24 -; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -71,38 +71,38 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: li a1, 56 +; RV32-NEXT: vsrl.vx v9, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsrl.vx v10, v8, a2 +; RV32-NEXT: lui a3, 16 +; RV32-NEXT: addi a3, a3, -256 +; RV32-NEXT: vand.vx v10, v10, a3 +; RV32-NEXT: vor.vv v9, v10, v9 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v9, 0 +; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vmv.v.i v0, 5 -; RV32-NEXT: lui a1, 1044480 -; RV32-NEXT: vmerge.vxm v9, v9, a1, v0 +; RV32-NEXT: lui a4, 1044480 +; RV32-NEXT: vmerge.vxm v10, v10, a4, v0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vsrl.vi v10, v8, 8 -; RV32-NEXT: vand.vv v10, v10, v9 -; RV32-NEXT: vsrl.vi v11, v8, 24 -; RV32-NEXT: lui a1, 4080 -; RV32-NEXT: vand.vx v11, v11, a1 -; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsrl.vx v11, v8, a2 -; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsrl.vx v12, v8, a3 -; RV32-NEXT: lui a4, 16 -; RV32-NEXT: addi a4, a4, -256 +; RV32-NEXT: vsrl.vi v11, v8, 8 +; RV32-NEXT: vand.vv v11, v11, v10 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: lui a4, 4080 ; RV32-NEXT: vand.vx v12, v12, a4 -; RV32-NEXT: vor.vv v11, v12, v11 -; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: vand.vv v9, v8, v9 -; RV32-NEXT: vsll.vi v9, v9, 8 -; RV32-NEXT: vand.vx v11, v8, a1 -; RV32-NEXT: vsll.vi v11, v11, 24 +; RV32-NEXT: vor.vv v11, v11, v12 ; RV32-NEXT: vor.vv v9, v11, v9 -; RV32-NEXT: vsll.vx v11, v8, a2 +; RV32-NEXT: vsll.vx v11, v8, a1 +; RV32-NEXT: vand.vx v12, v8, a3 +; RV32-NEXT: vsll.vx v12, v12, a2 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vand.vv v10, v8, v10 +; RV32-NEXT: vsll.vi v10, v10, 8 ; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vsll.vx v8, v8, a3 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vor.vv v8, v11, v8 ; RV32-NEXT: vor.vv v8, v8, v9 -; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; @@ -110,34 +110,34 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: li a1, 56 -; RV64-NEXT: vsrl.vx v9, v8, a1 -; RV64-NEXT: li a2, 40 -; RV64-NEXT: vsrl.vx v10, v8, a2 -; RV64-NEXT: lui a3, 16 -; RV64-NEXT: addiw a3, a3, -256 -; RV64-NEXT: vand.vx v10, v10, a3 -; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vsrl.vi v10, v8, 24 -; RV64-NEXT: lui a4, 4080 -; RV64-NEXT: vand.vx v10, v10, a4 -; RV64-NEXT: vsrl.vi v11, v8, 8 -; RV64-NEXT: li a5, 255 -; RV64-NEXT: slli a5, a5, 24 -; RV64-NEXT: vand.vx v11, v11, a5 -; RV64-NEXT: vor.vv v10, v11, v10 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsll.vi v9, v9, 8 +; RV64-NEXT: lui a2, 4080 +; RV64-NEXT: vand.vx v10, v8, a2 +; RV64-NEXT: vsll.vi v10, v10, 24 ; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vand.vx v10, v8, a5 -; RV64-NEXT: vsll.vi v10, v10, 8 +; RV64-NEXT: li a3, 56 +; RV64-NEXT: vsll.vx v10, v8, a3 +; RV64-NEXT: lui a4, 16 +; RV64-NEXT: addiw a4, a4, -256 ; RV64-NEXT: vand.vx v11, v8, a4 -; RV64-NEXT: vsll.vi v11, v11, 24 +; RV64-NEXT: li a5, 40 +; RV64-NEXT: vsll.vx v11, v11, a5 +; RV64-NEXT: vor.vv v10, v10, v11 +; RV64-NEXT: vor.vv v9, v10, v9 +; RV64-NEXT: vsrl.vx v10, v8, a3 +; RV64-NEXT: vsrl.vx v11, v8, a5 +; RV64-NEXT: vand.vx v11, v11, a4 ; RV64-NEXT: vor.vv v10, v11, v10 -; RV64-NEXT: vsll.vx v11, v8, a1 -; RV64-NEXT: vand.vx v8, v8, a3 -; RV64-NEXT: vsll.vx v8, v8, a2 -; RV64-NEXT: vor.vv v8, v11, v8 +; RV64-NEXT: vsrl.vi v11, v8, 24 +; RV64-NEXT: vand.vx v11, v11, a2 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v11 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret %a = load <2 x i64>, ptr %x @@ -325,20 +325,20 @@ ; LMULMAX2-RV32-NEXT: addi a3, a3, -256 ; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a3 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 24 -; LMULMAX2-RV32-NEXT: lui a4, 4080 -; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4 -; LMULMAX2-RV32-NEXT: li a5, 85 +; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 8 +; LMULMAX2-RV32-NEXT: li a4, 85 ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; LMULMAX2-RV32-NEXT: vmv.v.x v0, a5 +; LMULMAX2-RV32-NEXT: vmv.v.x v0, a4 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV32-NEXT: lui a5, 1044480 -; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a5, v0 +; LMULMAX2-RV32-NEXT: lui a4, 1044480 +; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a4, v0 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8 -; LMULMAX2-RV32-NEXT: vand.vv v16, v16, v14 -; LMULMAX2-RV32-NEXT: vor.vv v12, v16, v12 +; LMULMAX2-RV32-NEXT: vand.vv v12, v12, v14 +; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 24 +; LMULMAX2-RV32-NEXT: lui a4, 4080 +; LMULMAX2-RV32-NEXT: vand.vx v16, v16, a4 +; LMULMAX2-RV32-NEXT: vor.vv v12, v12, v16 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 ; LMULMAX2-RV32-NEXT: vsll.vx v12, v8, a1 ; LMULMAX2-RV32-NEXT: vand.vx v16, v8, a3 @@ -358,98 +358,98 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: li a1, 56 -; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: li a2, 40 -; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a2 -; LMULMAX2-RV64-NEXT: lui a3, 16 -; LMULMAX2-RV64-NEXT: addiw a3, a3, -256 -; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a3 -; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV64-NEXT: vsrl.vi v12, v8, 24 -; LMULMAX2-RV64-NEXT: lui a4, 4080 -; LMULMAX2-RV64-NEXT: vand.vx v12, v12, a4 -; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 8 -; LMULMAX2-RV64-NEXT: li a5, 255 -; LMULMAX2-RV64-NEXT: slli a5, a5, 24 -; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a5 -; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12 +; LMULMAX2-RV64-NEXT: li a1, 255 +; LMULMAX2-RV64-NEXT: slli a1, a1, 24 +; LMULMAX2-RV64-NEXT: vand.vx v10, v8, a1 +; LMULMAX2-RV64-NEXT: vsll.vi v10, v10, 8 +; LMULMAX2-RV64-NEXT: lui a2, 4080 +; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a2 +; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 24 ; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10 -; LMULMAX2-RV64-NEXT: vand.vx v12, v8, a5 -; LMULMAX2-RV64-NEXT: vsll.vi v12, v12, 8 +; LMULMAX2-RV64-NEXT: li a3, 56 +; LMULMAX2-RV64-NEXT: vsll.vx v12, v8, a3 +; LMULMAX2-RV64-NEXT: lui a4, 16 +; LMULMAX2-RV64-NEXT: addiw a4, a4, -256 ; LMULMAX2-RV64-NEXT: vand.vx v14, v8, a4 -; LMULMAX2-RV64-NEXT: vsll.vi v14, v14, 24 +; LMULMAX2-RV64-NEXT: li a5, 40 +; LMULMAX2-RV64-NEXT: vsll.vx v14, v14, a5 +; LMULMAX2-RV64-NEXT: vor.vv v12, v12, v14 +; LMULMAX2-RV64-NEXT: vor.vv v10, v12, v10 +; LMULMAX2-RV64-NEXT: vsrl.vx v12, v8, a3 +; LMULMAX2-RV64-NEXT: vsrl.vx v14, v8, a5 +; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a4 ; LMULMAX2-RV64-NEXT: vor.vv v12, v14, v12 -; LMULMAX2-RV64-NEXT: vsll.vx v14, v8, a1 -; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a3 -; LMULMAX2-RV64-NEXT: vsll.vx v8, v8, a2 -; LMULMAX2-RV64-NEXT: vor.vv v8, v14, v8 +; LMULMAX2-RV64-NEXT: vsrl.vi v14, v8, 24 +; LMULMAX2-RV64-NEXT: vand.vx v14, v14, a2 +; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 +; LMULMAX2-RV64-NEXT: vand.vx v8, v8, a1 +; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v14 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12 -; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 +; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v4i64: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 -; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) +; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) +; LMULMAX1-RV32-NEXT: li a2, 56 +; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2 +; LMULMAX1-RV32-NEXT: li a3, 40 +; LMULMAX1-RV32-NEXT: vsrl.vx v11, v8, a3 +; LMULMAX1-RV32-NEXT: lui a4, 16 +; LMULMAX1-RV32-NEXT: addi a4, a4, -256 +; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4 +; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmv.v.i v10, 0 +; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 ; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 -; LMULMAX1-RV32-NEXT: lui a2, 1044480 -; LMULMAX1-RV32-NEXT: vmerge.vxm v10, v10, a2, v0 +; LMULMAX1-RV32-NEXT: lui a5, 1044480 +; LMULMAX1-RV32-NEXT: vmerge.vxm v11, v11, a5, v0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 8 -; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v10 -; LMULMAX1-RV32-NEXT: vsrl.vi v12, v9, 24 -; LMULMAX1-RV32-NEXT: lui a2, 4080 -; LMULMAX1-RV32-NEXT: vand.vx v12, v12, a2 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v12 -; LMULMAX1-RV32-NEXT: li a3, 56 -; LMULMAX1-RV32-NEXT: vsrl.vx v12, v9, a3 -; LMULMAX1-RV32-NEXT: li a4, 40 -; LMULMAX1-RV32-NEXT: vsrl.vx v13, v9, a4 -; LMULMAX1-RV32-NEXT: lui a5, 16 -; LMULMAX1-RV32-NEXT: addi a5, a5, -256 -; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 -; LMULMAX1-RV32-NEXT: vor.vv v12, v13, v12 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v12 -; LMULMAX1-RV32-NEXT: vand.vv v12, v9, v10 -; LMULMAX1-RV32-NEXT: vsll.vi v12, v12, 8 -; LMULMAX1-RV32-NEXT: vand.vx v13, v9, a2 -; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24 -; LMULMAX1-RV32-NEXT: vor.vv v12, v13, v12 -; LMULMAX1-RV32-NEXT: vsll.vx v13, v9, a3 -; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a5 -; LMULMAX1-RV32-NEXT: vsll.vx v9, v9, a4 -; LMULMAX1-RV32-NEXT: vor.vv v9, v13, v9 -; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v12 -; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11 -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 8 -; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v10 -; LMULMAX1-RV32-NEXT: vsrl.vi v12, v8, 24 -; LMULMAX1-RV32-NEXT: vand.vx v12, v12, a2 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v12 -; LMULMAX1-RV32-NEXT: vsrl.vx v12, v8, a3 -; LMULMAX1-RV32-NEXT: vsrl.vx v13, v8, a4 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v8, 8 +; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v11 +; LMULMAX1-RV32-NEXT: vsrl.vi v13, v8, 24 +; LMULMAX1-RV32-NEXT: lui a5, 4080 ; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 -; LMULMAX1-RV32-NEXT: vor.vv v12, v13, v12 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v12 -; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v10 -; LMULMAX1-RV32-NEXT: vsll.vi v10, v10, 8 -; LMULMAX1-RV32-NEXT: vand.vx v12, v8, a2 -; LMULMAX1-RV32-NEXT: vsll.vi v12, v12, 24 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 ; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 -; LMULMAX1-RV32-NEXT: vsll.vx v12, v8, a3 +; LMULMAX1-RV32-NEXT: vsll.vx v12, v8, a2 +; LMULMAX1-RV32-NEXT: vand.vx v13, v8, a4 +; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vand.vv v13, v8, v11 +; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 8 ; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a5 -; LMULMAX1-RV32-NEXT: vsll.vx v8, v8, a4 +; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 24 +; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: vor.vv v8, v12, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 -; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) -; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsrl.vx v10, v9, a2 +; LMULMAX1-RV32-NEXT: vsrl.vx v12, v9, a3 +; LMULMAX1-RV32-NEXT: vand.vx v12, v12, a4 +; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v9, 8 +; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v11 +; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 24 +; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 +; LMULMAX1-RV32-NEXT: vsll.vx v12, v9, a2 +; LMULMAX1-RV32-NEXT: vand.vx v13, v9, a4 +; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vand.vv v11, v9, v11 +; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8 +; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a5 +; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 24 +; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11 +; LMULMAX1-RV32-NEXT: vor.vv v9, v12, v9 +; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 +; LMULMAX1-RV32-NEXT: vse64.v v9, (a0) +; LMULMAX1-RV32-NEXT: vse64.v v8, (a1) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v4i64: @@ -458,55 +458,55 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) -; LMULMAX1-RV64-NEXT: li a2, 56 -; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: li a3, 40 -; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, a3 -; LMULMAX1-RV64-NEXT: lui a4, 16 -; LMULMAX1-RV64-NEXT: addiw a4, a4, -256 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4 +; LMULMAX1-RV64-NEXT: li a2, 255 +; LMULMAX1-RV64-NEXT: slli a2, a2, 24 +; LMULMAX1-RV64-NEXT: vand.vx v10, v8, a2 +; LMULMAX1-RV64-NEXT: vsll.vi v10, v10, 8 +; LMULMAX1-RV64-NEXT: lui a3, 4080 +; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a3 +; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 24 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vsrl.vi v11, v8, 24 -; LMULMAX1-RV64-NEXT: lui a5, 4080 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 8 -; LMULMAX1-RV64-NEXT: li a6, 255 -; LMULMAX1-RV64-NEXT: slli a6, a6, 24 -; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6 -; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 -; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vand.vx v11, v8, a6 -; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8 +; LMULMAX1-RV64-NEXT: li a4, 56 +; LMULMAX1-RV64-NEXT: vsll.vx v11, v8, a4 +; LMULMAX1-RV64-NEXT: lui a5, 16 +; LMULMAX1-RV64-NEXT: addiw a5, a5, -256 ; LMULMAX1-RV64-NEXT: vand.vx v12, v8, a5 -; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24 -; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 -; LMULMAX1-RV64-NEXT: vsll.vx v12, v8, a2 -; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a4 -; LMULMAX1-RV64-NEXT: vsll.vx v8, v8, a3 -; LMULMAX1-RV64-NEXT: vor.vv v8, v12, v8 -; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 -; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, a3 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a4 +; LMULMAX1-RV64-NEXT: li a6, 40 +; LMULMAX1-RV64-NEXT: vsll.vx v12, v12, a6 +; LMULMAX1-RV64-NEXT: vor.vv v11, v11, v12 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vsrl.vi v11, v9, 24 -; LMULMAX1-RV64-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 8 -; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a6 +; LMULMAX1-RV64-NEXT: vsrl.vx v11, v8, a4 +; LMULMAX1-RV64-NEXT: vsrl.vx v12, v8, a6 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a5 ; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 +; LMULMAX1-RV64-NEXT: vsrl.vi v12, v8, 24 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a3 +; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 +; LMULMAX1-RV64-NEXT: vand.vx v8, v8, a2 +; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v12 +; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 +; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8 +; LMULMAX1-RV64-NEXT: vand.vx v10, v9, a2 +; LMULMAX1-RV64-NEXT: vsll.vi v10, v10, 8 +; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a3 +; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 24 ; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV64-NEXT: vand.vx v11, v9, a6 -; LMULMAX1-RV64-NEXT: vsll.vi v11, v11, 8 +; LMULMAX1-RV64-NEXT: vsll.vx v11, v9, a4 ; LMULMAX1-RV64-NEXT: vand.vx v12, v9, a5 -; LMULMAX1-RV64-NEXT: vsll.vi v12, v12, 24 +; LMULMAX1-RV64-NEXT: vsll.vx v12, v12, a6 +; LMULMAX1-RV64-NEXT: vor.vv v11, v11, v12 +; LMULMAX1-RV64-NEXT: vor.vv v10, v11, v10 +; LMULMAX1-RV64-NEXT: vsrl.vx v11, v9, a4 +; LMULMAX1-RV64-NEXT: vsrl.vx v12, v9, a6 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a5 ; LMULMAX1-RV64-NEXT: vor.vv v11, v12, v11 -; LMULMAX1-RV64-NEXT: vsll.vx v12, v9, a2 -; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a4 -; LMULMAX1-RV64-NEXT: vsll.vx v9, v9, a3 -; LMULMAX1-RV64-NEXT: vor.vv v9, v12, v9 +; LMULMAX1-RV64-NEXT: vsrl.vi v12, v9, 24 +; LMULMAX1-RV64-NEXT: vand.vx v12, v12, a3 +; LMULMAX1-RV64-NEXT: vsrl.vi v9, v9, 8 +; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a2 +; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v12 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v11 -; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 +; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9 ; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v8, (a1) ; LMULMAX1-RV64-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -1618,14 +1618,15 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v15i64_unmasked: ; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v24, v8, 1 -; RV32-NEXT: vand.vv v16, v24, v16 +; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 @@ -1775,14 +1776,15 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v16i64_unmasked: ; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v24, v8, 1 -; RV32-NEXT: vand.vv v16, v24, v16 +; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll @@ -264,13 +264,13 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) +; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 +; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX2-RV32-NEXT: vand.vv v9, v10, v9 +; LMULMAX2-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 @@ -340,13 +340,13 @@ ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) +; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 +; LMULMAX1-RV32-NEXT: vmv.v.x v10, a1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 -; LMULMAX1-RV32-NEXT: vand.vv v9, v10, v9 +; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 209715 ; LMULMAX1-RV32-NEXT: addi a1, a1, 819 @@ -772,13 +772,13 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) +; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 +; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32-NEXT: vsrl.vi v12, v8, 1 -; LMULMAX2-RV32-NEXT: vand.vv v10, v12, v10 +; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 @@ -847,56 +847,56 @@ ; LMULMAX1-RV32-LABEL: ctpop_v4i64: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 -; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) +; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) +; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a2, 349525 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmv.v.x v10, a2 +; LMULMAX1-RV32-NEXT: vmv.v.x v11, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 1 -; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v10 -; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v11 +; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v11 +; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: lui a2, 209715 ; LMULMAX1-RV32-NEXT: addi a2, a2, 819 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmv.v.x v11, a2 +; LMULMAX1-RV32-NEXT: vmv.v.x v10, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vand.vv v12, v9, v11 -; LMULMAX1-RV32-NEXT: vsrl.vi v9, v9, 2 -; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v11 -; LMULMAX1-RV32-NEXT: vadd.vv v9, v12, v9 -; LMULMAX1-RV32-NEXT: vsrl.vi v12, v9, 4 -; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v12 +; LMULMAX1-RV32-NEXT: vand.vv v12, v8, v10 +; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 +; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 +; LMULMAX1-RV32-NEXT: vadd.vv v8, v12, v8 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v8, 4 +; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v12 ; LMULMAX1-RV32-NEXT: lui a2, 61681 ; LMULMAX1-RV32-NEXT: addi a2, a2, -241 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v12, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v12 +; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12 ; LMULMAX1-RV32-NEXT: lui a2, 4112 ; LMULMAX1-RV32-NEXT: addi a2, a2, 257 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v13, a2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v13 -; LMULMAX1-RV32-NEXT: li a2, 56 -; LMULMAX1-RV32-NEXT: vsrl.vx v9, v9, a2 -; LMULMAX1-RV32-NEXT: vsrl.vi v14, v8, 1 -; LMULMAX1-RV32-NEXT: vand.vv v10, v14, v10 -; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v11 -; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 -; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11 -; LMULMAX1-RV32-NEXT: vadd.vv v8, v10, v8 -; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 4 -; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12 ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v13 +; LMULMAX1-RV32-NEXT: li a2, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a2 -; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) -; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsrl.vi v14, v9, 1 +; LMULMAX1-RV32-NEXT: vand.vv v11, v14, v11 +; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v11 +; LMULMAX1-RV32-NEXT: vand.vv v11, v9, v10 +; LMULMAX1-RV32-NEXT: vsrl.vi v9, v9, 2 +; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 +; LMULMAX1-RV32-NEXT: vadd.vv v9, v11, v9 +; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 4 +; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10 +; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v12 +; LMULMAX1-RV32-NEXT: vmul.vv v9, v9, v13 +; LMULMAX1-RV32-NEXT: vsrl.vx v9, v9, a2 +; LMULMAX1-RV32-NEXT: vse64.v v9, (a0) +; LMULMAX1-RV32-NEXT: vse64.v v8, (a1) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v4i64: Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -520,26 +520,25 @@ ; LMULMAX2-RV32F-LABEL: cttz_v2i64: ; LMULMAX2-RV32F: # %bb.0: ; LMULMAX2-RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX2-RV32F-NEXT: vle64.v v9, (a0) +; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX2-RV32F-NEXT: vmv.v.i v10, 0 +; LMULMAX2-RV32F-NEXT: vmv.v.i v9, 0 ; LMULMAX2-RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX2-RV32F-NEXT: vmseq.vv v8, v9, v10 -; LMULMAX2-RV32F-NEXT: vsub.vv v10, v10, v9 -; LMULMAX2-RV32F-NEXT: vand.vv v9, v9, v10 +; LMULMAX2-RV32F-NEXT: vsub.vv v10, v9, v8 +; LMULMAX2-RV32F-NEXT: vand.vv v10, v8, v10 ; LMULMAX2-RV32F-NEXT: vmset.m v0 ; LMULMAX2-RV32F-NEXT: fsrmi a1, 1 ; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v10, v9, v0.t +; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v11, v10, v0.t ; LMULMAX2-RV32F-NEXT: fsrm a1 -; LMULMAX2-RV32F-NEXT: vsrl.vi v9, v10, 23 +; LMULMAX2-RV32F-NEXT: vsrl.vi v10, v11, 23 ; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; LMULMAX2-RV32F-NEXT: vzext.vf2 v10, v9 +; LMULMAX2-RV32F-NEXT: vzext.vf2 v11, v10 ; LMULMAX2-RV32F-NEXT: li a1, 127 -; LMULMAX2-RV32F-NEXT: vsub.vx v9, v10, a1 +; LMULMAX2-RV32F-NEXT: vsub.vx v10, v11, a1 +; LMULMAX2-RV32F-NEXT: vmseq.vv v0, v8, v9 ; LMULMAX2-RV32F-NEXT: li a1, 64 -; LMULMAX2-RV32F-NEXT: vmv.v.v v0, v8 -; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v9, a1, v0 +; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v10, a1, v0 ; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32F-NEXT: ret ; @@ -567,24 +566,23 @@ ; LMULMAX2-RV32D-LABEL: cttz_v2i64: ; LMULMAX2-RV32D: # %bb.0: ; LMULMAX2-RV32D-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX2-RV32D-NEXT: vle64.v v9, (a0) +; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX2-RV32D-NEXT: vmv.v.i v10, 0 +; LMULMAX2-RV32D-NEXT: vmv.v.i v9, 0 ; LMULMAX2-RV32D-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX2-RV32D-NEXT: vmseq.vv v8, v9, v10 -; LMULMAX2-RV32D-NEXT: vsub.vv v10, v10, v9 -; LMULMAX2-RV32D-NEXT: vand.vv v9, v9, v10 +; LMULMAX2-RV32D-NEXT: vsub.vv v10, v9, v8 +; LMULMAX2-RV32D-NEXT: vand.vv v10, v8, v10 ; LMULMAX2-RV32D-NEXT: vmset.m v0 ; LMULMAX2-RV32D-NEXT: fsrmi a1, 1 -; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v9, v9, v0.t +; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v10, v10, v0.t ; LMULMAX2-RV32D-NEXT: fsrm a1 ; LMULMAX2-RV32D-NEXT: li a1, 52 -; LMULMAX2-RV32D-NEXT: vsrl.vx v9, v9, a1 +; LMULMAX2-RV32D-NEXT: vsrl.vx v10, v10, a1 ; LMULMAX2-RV32D-NEXT: li a1, 1023 -; LMULMAX2-RV32D-NEXT: vsub.vx v9, v9, a1 +; LMULMAX2-RV32D-NEXT: vsub.vx v10, v10, a1 +; LMULMAX2-RV32D-NEXT: vmseq.vv v0, v8, v9 ; LMULMAX2-RV32D-NEXT: li a1, 64 -; LMULMAX2-RV32D-NEXT: vmv.v.v v0, v8 -; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v9, a1, v0 +; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v10, a1, v0 ; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32D-NEXT: ret ; @@ -611,24 +609,23 @@ ; LMULMAX8-RV32-LABEL: cttz_v2i64: ; LMULMAX8-RV32: # %bb.0: ; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX8-RV32-NEXT: vle64.v v9, (a0) +; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX8-RV32-NEXT: vmv.v.i v10, 0 +; LMULMAX8-RV32-NEXT: vmv.v.i v9, 0 ; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX8-RV32-NEXT: vmseq.vv v8, v9, v10 -; LMULMAX8-RV32-NEXT: vsub.vv v10, v10, v9 -; LMULMAX8-RV32-NEXT: vand.vv v9, v9, v10 +; LMULMAX8-RV32-NEXT: vsub.vv v10, v9, v8 +; LMULMAX8-RV32-NEXT: vand.vv v10, v8, v10 ; LMULMAX8-RV32-NEXT: vmset.m v0 ; LMULMAX8-RV32-NEXT: fsrmi a1, 1 -; LMULMAX8-RV32-NEXT: vfcvt.f.xu.v v9, v9, v0.t +; LMULMAX8-RV32-NEXT: vfcvt.f.xu.v v10, v10, v0.t ; LMULMAX8-RV32-NEXT: fsrm a1 ; LMULMAX8-RV32-NEXT: li a1, 52 -; LMULMAX8-RV32-NEXT: vsrl.vx v9, v9, a1 +; LMULMAX8-RV32-NEXT: vsrl.vx v10, v10, a1 ; LMULMAX8-RV32-NEXT: li a1, 1023 -; LMULMAX8-RV32-NEXT: vsub.vx v9, v9, a1 +; LMULMAX8-RV32-NEXT: vsub.vx v10, v10, a1 +; LMULMAX8-RV32-NEXT: vmseq.vv v0, v8, v9 ; LMULMAX8-RV32-NEXT: li a1, 64 -; LMULMAX8-RV32-NEXT: vmv.v.v v0, v8 -; LMULMAX8-RV32-NEXT: vmerge.vxm v8, v9, a1, v0 +; LMULMAX8-RV32-NEXT: vmerge.vxm v8, v10, a1, v0 ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: ret ; @@ -1184,26 +1181,25 @@ ; LMULMAX2-RV32F-LABEL: cttz_v4i64: ; LMULMAX2-RV32F: # %bb.0: ; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32F-NEXT: vle64.v v10, (a0) +; LMULMAX2-RV32F-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-RV32F-NEXT: vmv.v.i v12, 0 +; LMULMAX2-RV32F-NEXT: vmv.v.i v10, 0 ; LMULMAX2-RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32F-NEXT: vmseq.vv v8, v10, v12 -; LMULMAX2-RV32F-NEXT: vsub.vv v12, v12, v10 -; LMULMAX2-RV32F-NEXT: vand.vv v10, v10, v12 +; LMULMAX2-RV32F-NEXT: vsub.vv v12, v10, v8 +; LMULMAX2-RV32F-NEXT: vand.vv v12, v8, v12 ; LMULMAX2-RV32F-NEXT: vmset.m v0 ; LMULMAX2-RV32F-NEXT: fsrmi a1, 1 ; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v9, v10, v0.t +; LMULMAX2-RV32F-NEXT: vfncvt.f.xu.w v14, v12, v0.t ; LMULMAX2-RV32F-NEXT: fsrm a1 -; LMULMAX2-RV32F-NEXT: vsrl.vi v9, v9, 23 +; LMULMAX2-RV32F-NEXT: vsrl.vi v12, v14, 23 ; LMULMAX2-RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; LMULMAX2-RV32F-NEXT: vzext.vf2 v10, v9 +; LMULMAX2-RV32F-NEXT: vzext.vf2 v14, v12 ; LMULMAX2-RV32F-NEXT: li a1, 127 -; LMULMAX2-RV32F-NEXT: vsub.vx v10, v10, a1 +; LMULMAX2-RV32F-NEXT: vsub.vx v12, v14, a1 +; LMULMAX2-RV32F-NEXT: vmseq.vv v0, v8, v10 ; LMULMAX2-RV32F-NEXT: li a1, 64 -; LMULMAX2-RV32F-NEXT: vmv1r.v v0, v8 -; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v10, a1, v0 +; LMULMAX2-RV32F-NEXT: vmerge.vxm v8, v12, a1, v0 ; LMULMAX2-RV32F-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32F-NEXT: ret ; @@ -1231,24 +1227,23 @@ ; LMULMAX2-RV32D-LABEL: cttz_v4i64: ; LMULMAX2-RV32D: # %bb.0: ; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32D-NEXT: vle64.v v10, (a0) +; LMULMAX2-RV32D-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-RV32D-NEXT: vmv.v.i v12, 0 +; LMULMAX2-RV32D-NEXT: vmv.v.i v10, 0 ; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV32D-NEXT: vmseq.vv v8, v10, v12 -; LMULMAX2-RV32D-NEXT: vsub.vv v12, v12, v10 -; LMULMAX2-RV32D-NEXT: vand.vv v10, v10, v12 +; LMULMAX2-RV32D-NEXT: vsub.vv v12, v10, v8 +; LMULMAX2-RV32D-NEXT: vand.vv v12, v8, v12 ; LMULMAX2-RV32D-NEXT: vmset.m v0 ; LMULMAX2-RV32D-NEXT: fsrmi a1, 1 -; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v10, v10, v0.t +; LMULMAX2-RV32D-NEXT: vfcvt.f.xu.v v12, v12, v0.t ; LMULMAX2-RV32D-NEXT: fsrm a1 ; LMULMAX2-RV32D-NEXT: li a1, 52 -; LMULMAX2-RV32D-NEXT: vsrl.vx v10, v10, a1 +; LMULMAX2-RV32D-NEXT: vsrl.vx v12, v12, a1 ; LMULMAX2-RV32D-NEXT: li a1, 1023 -; LMULMAX2-RV32D-NEXT: vsub.vx v10, v10, a1 +; LMULMAX2-RV32D-NEXT: vsub.vx v12, v12, a1 +; LMULMAX2-RV32D-NEXT: vmseq.vv v0, v8, v10 ; LMULMAX2-RV32D-NEXT: li a1, 64 -; LMULMAX2-RV32D-NEXT: vmv1r.v v0, v8 -; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v10, a1, v0 +; LMULMAX2-RV32D-NEXT: vmerge.vxm v8, v12, a1, v0 ; LMULMAX2-RV32D-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32D-NEXT: ret ; @@ -1275,24 +1270,23 @@ ; LMULMAX8-RV32-LABEL: cttz_v4i64: ; LMULMAX8-RV32: # %bb.0: ; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX8-RV32-NEXT: vle64.v v10, (a0) +; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX8-RV32-NEXT: vmv.v.i v12, 0 +; LMULMAX8-RV32-NEXT: vmv.v.i v10, 0 ; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX8-RV32-NEXT: vmseq.vv v8, v10, v12 -; LMULMAX8-RV32-NEXT: vsub.vv v12, v12, v10 -; LMULMAX8-RV32-NEXT: vand.vv v10, v10, v12 +; LMULMAX8-RV32-NEXT: vsub.vv v12, v10, v8 +; LMULMAX8-RV32-NEXT: vand.vv v12, v8, v12 ; LMULMAX8-RV32-NEXT: vmset.m v0 ; LMULMAX8-RV32-NEXT: fsrmi a1, 1 -; LMULMAX8-RV32-NEXT: vfcvt.f.xu.v v10, v10, v0.t +; LMULMAX8-RV32-NEXT: vfcvt.f.xu.v v12, v12, v0.t ; LMULMAX8-RV32-NEXT: fsrm a1 ; LMULMAX8-RV32-NEXT: li a1, 52 -; LMULMAX8-RV32-NEXT: vsrl.vx v10, v10, a1 +; LMULMAX8-RV32-NEXT: vsrl.vx v12, v12, a1 ; LMULMAX8-RV32-NEXT: li a1, 1023 -; LMULMAX8-RV32-NEXT: vsub.vx v10, v10, a1 +; LMULMAX8-RV32-NEXT: vsub.vx v12, v12, a1 +; LMULMAX8-RV32-NEXT: vmseq.vv v0, v8, v10 ; LMULMAX8-RV32-NEXT: li a1, 64 -; LMULMAX8-RV32-NEXT: vmv1r.v v0, v8 -; LMULMAX8-RV32-NEXT: vmerge.vxm v8, v10, a1, v0 +; LMULMAX8-RV32-NEXT: vmerge.vxm v8, v12, a1, v0 ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: ret ; Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -682,20 +682,20 @@ define i32 @extractelt_sdiv_v4i32(<4 x i32> %x) { ; RV32NOM-LABEL: extractelt_sdiv_v4i32: ; RV32NOM: # %bb.0: -; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32NOM-NEXT: vmv.v.i v9, -1 -; RV32NOM-NEXT: vmv.v.i v10, 0 -; RV32NOM-NEXT: vslideup.vi v10, v9, 3 ; RV32NOM-NEXT: lui a0, %hi(.LCPI38_0) ; RV32NOM-NEXT: addi a0, a0, %lo(.LCPI38_0) +; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32NOM-NEXT: vle32.v v9, (a0) +; RV32NOM-NEXT: vmv.v.i v10, -1 +; RV32NOM-NEXT: vmv.v.i v11, 0 +; RV32NOM-NEXT: vslideup.vi v11, v10, 3 ; RV32NOM-NEXT: lui a0, %hi(.LCPI38_1) ; RV32NOM-NEXT: addi a0, a0, %lo(.LCPI38_1) -; RV32NOM-NEXT: vle32.v v11, (a0) -; RV32NOM-NEXT: vand.vv v10, v8, v10 -; RV32NOM-NEXT: vmulh.vv v8, v8, v9 -; RV32NOM-NEXT: vadd.vv v8, v8, v10 -; RV32NOM-NEXT: vsra.vv v9, v8, v11 +; RV32NOM-NEXT: vle32.v v10, (a0) +; RV32NOM-NEXT: vmulh.vv v9, v8, v9 +; RV32NOM-NEXT: vand.vv v8, v8, v11 +; RV32NOM-NEXT: vadd.vv v8, v9, v8 +; RV32NOM-NEXT: vsra.vv v9, v8, v10 ; RV32NOM-NEXT: vsrl.vi v8, v8, 31 ; RV32NOM-NEXT: vadd.vv v8, v9, v8 ; RV32NOM-NEXT: vslidedown.vi v8, v8, 2 @@ -717,20 +717,20 @@ ; ; RV64-LABEL: extractelt_sdiv_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vmv.v.i v9, -1 -; RV64-NEXT: vmv.v.i v10, 0 -; RV64-NEXT: vslideup.vi v10, v9, 3 ; RV64-NEXT: lui a0, %hi(.LCPI38_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI38_0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v9, (a0) +; RV64-NEXT: vmv.v.i v10, -1 +; RV64-NEXT: vmv.v.i v11, 0 +; RV64-NEXT: vslideup.vi v11, v10, 3 ; RV64-NEXT: lui a0, %hi(.LCPI38_1) ; RV64-NEXT: addi a0, a0, %lo(.LCPI38_1) -; RV64-NEXT: vle32.v v11, (a0) -; RV64-NEXT: vand.vv v10, v8, v10 -; RV64-NEXT: vmulh.vv v8, v8, v9 -; RV64-NEXT: vadd.vv v8, v8, v10 -; RV64-NEXT: vsra.vv v8, v8, v11 +; RV64-NEXT: vle32.v v10, (a0) +; RV64-NEXT: vmulh.vv v9, v8, v9 +; RV64-NEXT: vand.vv v8, v8, v11 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsra.vv v8, v8, v10 ; RV64-NEXT: vsrl.vi v9, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vslidedown.vi v8, v8, 2 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll @@ -9,7 +9,8 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVFADD_VV_M1_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1 [[COPY1]], [[COPY]], 2, 6 /* e64 */, implicit $frm + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF + ; CHECK-NEXT: [[PseudoVFADD_VV_M1_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1 [[DEF]], [[COPY1]], [[COPY]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $frm ; CHECK-NEXT: $v8 = COPY [[PseudoVFADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 %1 = fadd fast <2 x double> %x, %y Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -99,10 +99,10 @@ ; CHECK-LABEL: vrgather_shuffle_xv_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vmv.v.i v9, 5 -; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 4 ; CHECK-NEXT: vmv.v.i v0, 12 -; CHECK-NEXT: vrsub.vi v10, v10, 4 +; CHECK-NEXT: vmv.v.i v9, 5 ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -177,40 +177,39 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) { ; RV32-LABEL: vrgather_shuffle_vv_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV32-NEXT: vmv.v.i v16, 5 ; RV32-NEXT: lui a0, %hi(.LCPI11_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle16.v v20, (a0) -; RV32-NEXT: vmv.v.i v21, 2 -; RV32-NEXT: vslideup.vi v21, v16, 7 -; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vrgatherei16.vv v16, v8, v20 +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; RV32-NEXT: vmv.v.i v8, 5 +; RV32-NEXT: vmv.v.i v9, 2 +; RV32-NEXT: vslideup.vi v9, v8, 7 ; RV32-NEXT: li a0, 164 -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.x v0, a0 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vrgatherei16.vv v16, v12, v21, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vrgatherei16.vv v16, v12, v9, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 5 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vmv.v.i v20, 2 ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI11_0) -; RV64-NEXT: vle64.v v24, (a0) -; RV64-NEXT: vslideup.vi v20, v16, 7 -; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vle64.v v20, (a0) +; RV64-NEXT: vmv4r.v v16, v8 +; RV64-NEXT: vrgather.vv v8, v16, v20 +; RV64-NEXT: li a0, 5 +; RV64-NEXT: vmv.s.x v20, a0 +; RV64-NEXT: vmv.v.i v16, 2 +; RV64-NEXT: vslideup.vi v16, v20, 7 ; RV64-NEXT: li a0, 164 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vrgather.vv v16, v12, v20, v0.t -; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: vrgather.vv v8, v12, v16, v0.t ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> ret <8 x i64> %s @@ -386,10 +385,9 @@ define <8 x i8> @splat_ve2_we0(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: li a0, 66 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v0, a0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 2 ; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -420,11 +418,10 @@ define <8 x i8> @splat_ve2_we0_ins_i0we4(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0_ins_i0we4: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 67 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 2 +; CHECK-NEXT: li a0, 67 +; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 4 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -471,14 +468,14 @@ ; CHECK-LABEL: splat_ve2_we0_ins_i2we4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vmv.v.i v10, 4 +; CHECK-NEXT: vrgather.vi v10, v8, 2 +; CHECK-NEXT: vmv.v.i v8, 4 ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma -; CHECK-NEXT: vslideup.vi v11, v10, 2 +; CHECK-NEXT: vslideup.vi v11, v8, 2 ; CHECK-NEXT: li a0, 70 ; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vrgather.vi v10, v8, 2 ; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -489,38 +486,38 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) { ; RV32-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; RV32-NEXT: vmv.v.i v10, 6 -; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, ma -; RV32-NEXT: vslideup.vi v11, v10, 5 ; RV32-NEXT: lui a0, 8256 ; RV32-NEXT: addi a0, a0, 2 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV32-NEXT: vmv.v.x v12, a0 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vmv.v.x v11, a0 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vrgather.vv v10, v8, v11 +; RV32-NEXT: vmv.v.i v8, 6 +; RV32-NEXT: vmv.v.i v11, 0 +; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, ma +; RV32-NEXT: vslideup.vi v11, v8, 5 ; RV32-NEXT: li a0, 98 ; RV32-NEXT: vmv.v.x v0, a0 -; RV32-NEXT: vrgather.vv v10, v8, v12 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vv v10, v9, v11, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; RV64-NEXT: vmv.v.i v10, 6 -; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, ma -; RV64-NEXT: vslideup.vi v11, v10, 5 ; RV64-NEXT: lui a0, 8256 ; RV64-NEXT: addiw a0, a0, 2 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV64-NEXT: vmv.v.x v12, a0 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vmv.v.x v11, a0 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vrgather.vv v10, v8, v11 +; RV64-NEXT: vmv.v.i v8, 6 +; RV64-NEXT: vmv.v.i v11, 0 +; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, ma +; RV64-NEXT: vslideup.vi v11, v8, 5 ; RV64-NEXT: li a0, 98 ; RV64-NEXT: vmv.v.x v0, a0 -; RV64-NEXT: vrgather.vv v10, v8, v12 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vv v10, v9, v11, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -780,19 +780,19 @@ ; CHECK-LABEL: sdiv_v6i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vdiv.vv v10, v8, v9 ; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v10, v8, 4 -; CHECK-NEXT: vslidedown.vi v11, v9, 4 +; CHECK-NEXT: vslidedown.vi v9, v9, 4 +; CHECK-NEXT: vslidedown.vi v8, v8, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vdiv.vv v10, v11, v10 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vdiv.vv v8, v9, v8 +; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vslideup.vi v8, v10, 4 +; CHECK-NEXT: vslideup.vi v10, v8, 4 ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: vse16.v v10, (a0) ; CHECK-NEXT: ret %a = load <6 x i16>, ptr %x %b = load <6 x i16>, ptr %y @@ -869,19 +869,19 @@ ; CHECK-LABEL: srem_v6i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vrem.vv v10, v8, v9 ; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v10, v8, 4 -; CHECK-NEXT: vslidedown.vi v11, v9, 4 +; CHECK-NEXT: vslidedown.vi v9, v9, 4 +; CHECK-NEXT: vslidedown.vi v8, v8, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vrem.vv v10, v11, v10 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vrem.vv v8, v9, v8 +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vslideup.vi v8, v10, 4 +; CHECK-NEXT: vslideup.vi v10, v8, 4 ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: vse16.v v10, (a0) ; CHECK-NEXT: ret %a = load <6 x i16>, ptr %x %b = load <6 x i16>, ptr %y @@ -958,19 +958,19 @@ ; CHECK-LABEL: udiv_v6i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vdivu.vv v10, v8, v9 ; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v10, v8, 4 -; CHECK-NEXT: vslidedown.vi v11, v9, 4 +; CHECK-NEXT: vslidedown.vi v9, v9, 4 +; CHECK-NEXT: vslidedown.vi v8, v8, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vdivu.vv v10, v11, v10 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vdivu.vv v8, v9, v8 +; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vslideup.vi v8, v10, 4 +; CHECK-NEXT: vslideup.vi v10, v8, 4 ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: vse16.v v10, (a0) ; CHECK-NEXT: ret %a = load <6 x i16>, ptr %x %b = load <6 x i16>, ptr %y @@ -1047,19 +1047,19 @@ ; CHECK-LABEL: urem_v6i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vremu.vv v10, v8, v9 ; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v10, v8, 4 -; CHECK-NEXT: vslidedown.vi v11, v9, 4 +; CHECK-NEXT: vslidedown.vi v9, v9, 4 +; CHECK-NEXT: vslidedown.vi v8, v8, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vremu.vv v10, v11, v10 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vremu.vv v8, v9, v8 +; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vslideup.vi v8, v10, 4 +; CHECK-NEXT: vslideup.vi v10, v8, 4 ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: vse16.v v10, (a0) ; CHECK-NEXT: ret %a = load <6 x i16>, ptr %x %b = load <6 x i16>, ptr %y @@ -1205,32 +1205,32 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: lui a1, 1048568 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma +; CHECK-NEXT: vmv.v.i v9, 1 ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vmv.s.x v10, a1 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vmv.v.i v11, 1 ; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; CHECK-NEXT: vslideup.vi v9, v11, 6 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vslideup.vi v11, v9, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: lui a1, %hi(.LCPI66_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0) ; CHECK-NEXT: vle16.v v12, (a1) -; CHECK-NEXT: vsrl.vv v9, v8, v9 -; CHECK-NEXT: vmulhu.vv v9, v9, v12 -; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vv v11, v8, v11 +; CHECK-NEXT: vmulhu.vv v11, v11, v12 +; CHECK-NEXT: vsub.vv v8, v8, v11 +; CHECK-NEXT: lui a1, 1048568 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v10, a1 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v10 -; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vadd.vv v8, v8, v11 ; CHECK-NEXT: li a1, 33 ; CHECK-NEXT: vmv.v.x v0, a1 -; CHECK-NEXT: vmv.v.i v9, 3 -; CHECK-NEXT: vmerge.vim v9, v9, 2, v0 +; CHECK-NEXT: vmv.v.i v10, 3 +; CHECK-NEXT: vmerge.vim v10, v10, 2, v0 ; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; CHECK-NEXT: vslideup.vi v9, v11, 6 +; CHECK-NEXT: vslideup.vi v10, v9, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x i16>, ptr %x @@ -1249,11 +1249,12 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vdivu.vv v9, v8, v9 -; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v8, v8, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vadd.vi v10, v10, 12 +; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vi v8, v8, 4 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 4 @@ -1271,18 +1272,18 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a1, 524288 -; CHECK-NEXT: vmv.s.x v9, a1 -; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma -; CHECK-NEXT: vslideup.vi v10, v9, 2 ; CHECK-NEXT: lui a1, %hi(.LCPI68_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI68_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmulhu.vv v9, v8, v9 ; CHECK-NEXT: vsub.vv v8, v8, v9 -; CHECK-NEXT: vmulhu.vv v8, v8, v10 +; CHECK-NEXT: lui a1, 524288 +; CHECK-NEXT: vmv.s.x v10, a1 +; CHECK-NEXT: vmv.v.i v11, 0 +; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; CHECK-NEXT: vslideup.vi v11, v10, 2 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmulhu.vv v8, v8, v11 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vmv.v.i v9, 1 ; CHECK-NEXT: vmv.v.i v10, 2 @@ -1524,16 +1525,16 @@ ; RV32-NEXT: vrsub.vi v10, v10, 0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmadd.vv v10, v8, v9 +; RV32-NEXT: li a1, 63 +; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v8, 1 -; RV32-NEXT: vmv.v.i v9, 0 +; RV32-NEXT: vmv.v.i v9, 1 +; RV32-NEXT: vmv.v.i v11, 0 ; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma -; RV32-NEXT: vslideup.vi v9, v8, 2 +; RV32-NEXT: vslideup.vi v11, v9, 2 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vsra.vv v8, v10, v9 -; RV32-NEXT: li a1, 63 -; RV32-NEXT: vsrl.vx v9, v10, a1 -; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: vsra.vv v9, v10, v11 +; RV32-NEXT: vadd.vv v8, v9, v8 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; @@ -5172,18 +5173,18 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) +; LMULMAX2-NEXT: lui a1, %hi(.LCPI183_0) +; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI183_0) +; LMULMAX2-NEXT: vle32.v v10, (a1) +; LMULMAX2-NEXT: vmulhu.vv v10, v8, v10 +; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: li a1, 68 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; LMULMAX2-NEXT: vmv.v.x v0, a1 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-NEXT: lui a1, %hi(.LCPI183_0) -; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI183_0) -; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmv.v.i v12, 0 ; LMULMAX2-NEXT: lui a1, 524288 ; LMULMAX2-NEXT: vmerge.vxm v12, v12, a1, v0 -; LMULMAX2-NEXT: vmulhu.vv v10, v8, v10 -; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vmulhu.vv v8, v8, v12 ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: li a1, 136 @@ -5199,33 +5200,33 @@ ; LMULMAX1-RV32-LABEL: mulhu_v8i32: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 -; LMULMAX1-RV32-NEXT: vle32.v v9, (a1) -; LMULMAX1-RV32-NEXT: lui a2, 524288 -; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2 -; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma -; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2 +; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI183_0) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI183_0) +; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) +; LMULMAX1-RV32-NEXT: vle32.v v10, (a0) +; LMULMAX1-RV32-NEXT: vmulhu.vv v11, v8, v9 +; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11 +; LMULMAX1-RV32-NEXT: lui a2, 524288 +; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2 +; LMULMAX1-RV32-NEXT: vmv.v.i v13, 0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; LMULMAX1-RV32-NEXT: vslideup.vi v13, v12, 2 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) -; LMULMAX1-RV32-NEXT: vmulhu.vv v12, v9, v10 -; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v12 -; LMULMAX1-RV32-NEXT: vmulhu.vv v9, v9, v11 -; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v12 -; LMULMAX1-RV32-NEXT: vmv.v.i v12, 1 -; LMULMAX1-RV32-NEXT: vmv.v.i v13, 2 -; LMULMAX1-RV32-NEXT: vslideup.vi v13, v12, 3 -; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v13 -; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v8, v10 -; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vmulhu.vv v8, v8, v11 -; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v13 -; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) -; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vmulhu.vv v8, v8, v13 +; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11 +; LMULMAX1-RV32-NEXT: vmv.v.i v11, 1 +; LMULMAX1-RV32-NEXT: vmv.v.i v12, 2 +; LMULMAX1-RV32-NEXT: vslideup.vi v12, v11, 3 +; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v12 +; LMULMAX1-RV32-NEXT: vmulhu.vv v9, v10, v9 +; LMULMAX1-RV32-NEXT: vsub.vv v10, v10, v9 +; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v10, v13 +; LMULMAX1-RV32-NEXT: vadd.vv v9, v10, v9 +; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v12 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a0) +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mulhu_v8i32: @@ -5282,24 +5283,24 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV64-NEXT: li a1, -1 -; LMULMAX2-RV64-NEXT: slli a1, a1, 63 -; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1 -; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 -; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma -; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI184_0) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI184_0) -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v10, (a1) ; LMULMAX2-RV64-NEXT: vmulhu.vv v10, v8, v10 +; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10 +; LMULMAX2-RV64-NEXT: li a1, -1 +; LMULMAX2-RV64-NEXT: slli a1, a1, 63 +; LMULMAX2-RV64-NEXT: vmv.s.x v12, a1 +; LMULMAX2-RV64-NEXT: vmv.v.i v14, 0 +; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma +; LMULMAX2-RV64-NEXT: vslideup.vi v14, v12, 2 +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI184_1) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI184_1) -; LMULMAX2-RV64-NEXT: vle64.v v14, (a1) -; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v12 +; LMULMAX2-RV64-NEXT: vle64.v v12, (a1) +; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v14 ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v14 +; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v12 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret ; @@ -5329,46 +5330,46 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: addi a1, a0, 16 -; LMULMAX1-RV64-NEXT: vle64.v v9, (a1) -; LMULMAX1-RV64-NEXT: vmv.v.i v10, 0 -; LMULMAX1-RV64-NEXT: li a2, -1 -; LMULMAX1-RV64-NEXT: slli a2, a2, 63 +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI184_0) +; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI184_0) +; LMULMAX1-RV64-NEXT: vlse64.v v9, (a1), zero +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI184_1) +; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI184_1)(a1) +; LMULMAX1-RV64-NEXT: addi a2, a0, 16 +; LMULMAX1-RV64-NEXT: vle64.v v10, (a2) ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; LMULMAX1-RV64-NEXT: vmv.s.x v10, a2 -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_0) -; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI184_0) +; LMULMAX1-RV64-NEXT: vmv.s.x v9, a1 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; LMULMAX1-RV64-NEXT: vlse64.v v11, (a2), zero -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_1) -; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI184_1)(a2) +; LMULMAX1-RV64-NEXT: vmulhu.vv v9, v10, v9 +; LMULMAX1-RV64-NEXT: vsub.vv v10, v10, v9 +; LMULMAX1-RV64-NEXT: vmv.v.i v11, 0 +; LMULMAX1-RV64-NEXT: li a1, -1 +; LMULMAX1-RV64-NEXT: slli a1, a1, 63 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2 +; LMULMAX1-RV64-NEXT: vmv.s.x v11, a1 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; LMULMAX1-RV64-NEXT: vmulhu.vv v11, v9, v11 -; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v11 -; LMULMAX1-RV64-NEXT: vmulhu.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v11 +; LMULMAX1-RV64-NEXT: vmulhu.vv v10, v10, v11 +; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9 ; LMULMAX1-RV64-NEXT: vid.v v10 ; LMULMAX1-RV64-NEXT: vadd.vi v11, v10, 2 ; LMULMAX1-RV64-NEXT: vsrl.vv v9, v9, v11 -; LMULMAX1-RV64-NEXT: lui a2, 838861 -; LMULMAX1-RV64-NEXT: addiw a2, a2, -819 -; LMULMAX1-RV64-NEXT: slli a3, a2, 32 -; LMULMAX1-RV64-NEXT: add a2, a2, a3 -; LMULMAX1-RV64-NEXT: vmv.v.x v11, a2 -; LMULMAX1-RV64-NEXT: lui a2, 699051 -; LMULMAX1-RV64-NEXT: addiw a2, a2, -1365 -; LMULMAX1-RV64-NEXT: slli a3, a2, 32 -; LMULMAX1-RV64-NEXT: add a2, a2, a3 +; LMULMAX1-RV64-NEXT: lui a1, 838861 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -819 +; LMULMAX1-RV64-NEXT: slli a3, a1, 32 +; LMULMAX1-RV64-NEXT: add a1, a1, a3 +; LMULMAX1-RV64-NEXT: vmv.v.x v11, a1 +; LMULMAX1-RV64-NEXT: lui a1, 699051 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -1365 +; LMULMAX1-RV64-NEXT: slli a3, a1, 32 +; LMULMAX1-RV64-NEXT: add a1, a1, a3 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma -; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2 +; LMULMAX1-RV64-NEXT: vmv.s.x v11, a1 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmulhu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vadd.vi v10, v10, 1 ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) -; LMULMAX1-RV64-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, ptr %x %b = udiv <4 x i64> %a, Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll @@ -137,6 +137,21 @@ ; RV32-NEXT: sub sp, sp, a2 ; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xce, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 78 * vlenb ; RV32-NEXT: addi a3, a1, 256 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vid.v v24 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a4, 37 +; RV32-NEXT: mul a2, a2, a4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs4r.v v24, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vadd.vi v8, v24, -4 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a4, 13 +; RV32-NEXT: mul a2, a2, a4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs4r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vle32.v v16, (a3) @@ -147,20 +162,6 @@ ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma -; RV32-NEXT: vid.v v24 -; RV32-NEXT: csrr a3, vlenb -; RV32-NEXT: li a4, 37 -; RV32-NEXT: mul a3, a3, a4 -; RV32-NEXT: add a3, sp, a3 -; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vs4r.v v24, (a3) # Unknown-size Folded Spill -; RV32-NEXT: vadd.vi v8, v24, -4 -; RV32-NEXT: csrr a3, vlenb -; RV32-NEXT: li a4, 13 -; RV32-NEXT: mul a3, a3, a4 -; RV32-NEXT: add a3, sp, a3 -; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vs4r.v v8, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vrgather.vv v4, v16, v8 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: li a4, 41 @@ -656,145 +657,159 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 86 +; RV64-NEXT: li a3, 88 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: sub sp, sp, a2 -; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd6, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 86 * vlenb -; RV64-NEXT: addi a2, a1, 256 +; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 88 * vlenb ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vle64.v v8, (a2) +; RV64-NEXT: addi a2, a1, 256 +; RV64-NEXT: vle64.v v16, (a2) ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 77 +; RV64-NEXT: li a3, 56 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 -; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; RV64-NEXT: addi a2, a1, 128 -; RV64-NEXT: vle64.v v24, (a2) +; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 53 +; RV64-NEXT: li a3, 80 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 -; RV64-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill -; RV64-NEXT: vle64.v v0, (a1) +; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: li a2, 72 +; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vrgather.vi v8, v16, 4 +; RV64-NEXT: li a1, 128 +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.x v1, a1 +; RV64-NEXT: vsetivli zero, 8, e64, m8, ta, ma +; RV64-NEXT: vslidedown.vi v24, v16, 8 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: li a2, 48 +; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmv1r.v v0, v1 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 61 +; RV64-NEXT: li a2, 36 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs8r.v v0, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vid.v v8 +; RV64-NEXT: vs1r.v v1, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vrgather.vi v8, v24, 2, v0.t +; RV64-NEXT: vmv.v.v v4, v8 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: li a1, 6 -; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: vid.v v16 +; RV64-NEXT: vmul.vx v24, v16, a1 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 6 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; RV64-NEXT: li a1, 56 -; RV64-NEXT: vrgather.vv v16, v0, v8 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 69 +; RV64-NEXT: li a3, 72 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 -; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; RV64-NEXT: vadd.vi v8, v8, -16 +; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vv v16, v8, v24 +; RV64-NEXT: vadd.vi v24, v24, -16 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v0, a1 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 41 +; RV64-NEXT: li a2, 40 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV64-NEXT: li a1, 128 -; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 77 -; RV64-NEXT: mul a2, a2, a3 -; RV64-NEXT: add a2, sp, a2 -; RV64-NEXT: addi a2, a2, 16 -; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vi v20, v8, 4 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; RV64-NEXT: vmv.v.x v0, a1 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 37 +; RV64-NEXT: li a2, 80 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 8, e64, m8, ta, ma -; RV64-NEXT: vslidedown.vi v24, v8, 8 +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vv v16, v8, v24, v0.t +; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma +; RV64-NEXT: vmv.v.v v4, v16 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 28 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vrgather.vi v20, v24, 2, v0.t -; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma -; RV64-NEXT: vmv.v.v v20, v16 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 29 +; RV64-NEXT: li a2, 56 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vi v12, v8, 5 +; RV64-NEXT: vmv1r.v v0, v1 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 69 +; RV64-NEXT: li a2, 48 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vadd.vi v8, v24, 1 +; RV64-NEXT: vrgather.vi v12, v24, 3, v0.t ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 61 +; RV64-NEXT: li a2, 44 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v16, v0, v8 -; RV64-NEXT: vadd.vi v8, v24, -15 +; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 41 -; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vadd.vi v0, v8, 1 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 53 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vrgather.vv v16, v24, v0 +; RV64-NEXT: vadd.vi v24, v8, -15 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 77 +; RV64-NEXT: li a2, 40 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vi v8, v24, 5 +; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 37 +; RV64-NEXT: li a2, 80 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vv v16, v8, v24, v0.t +; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 44 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vi v8, v24, 3, v0.t -; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma +; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 25 +; RV64-NEXT: li a2, 44 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 @@ -802,92 +817,87 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vmv.v.i v8, 6 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 5 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 3 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vslideup.vi v8, v12, 5 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 77 +; RV64-NEXT: li a2, 56 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v16, v24, v8 +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vv v12, v16, v8 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 36 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vi v16, v8, 4, v0.t +; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 41 +; RV64-NEXT: li a2, 48 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vi v12, v16, 4, v0.t ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 69 +; RV64-NEXT: li a2, 40 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 6 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vi v8, v0, 2 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 61 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vrgather.vv v16, v24, v8 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 4 -; RV64-NEXT: add a1, a2, a1 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vmv.v.v v24, v16 ; RV64-NEXT: li a1, 24 ; RV64-NEXT: vadd.vi v8, v0, -14 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v0, a1 -; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 53 -; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 4 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: li a2, 80 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t +; RV64-NEXT: vrgather.vv v24, v16, v8, v0.t ; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 41 +; RV64-NEXT: li a2, 40 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v24 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 41 +; RV64-NEXT: li a2, 40 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 @@ -896,15 +906,15 @@ ; RV64-NEXT: li a1, 1 ; RV64-NEXT: vmv.v.i v8, 7 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: slli a3, a2, 4 -; RV64-NEXT: add a2, a3, a2 +; RV64-NEXT: li a3, 24 +; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 ; RV64-NEXT: vs4r.v v8, (a2) # Unknown-size Folded Spill ; RV64-NEXT: vmv.s.x v12, a1 ; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 13 +; RV64-NEXT: li a2, 20 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 @@ -912,88 +922,86 @@ ; RV64-NEXT: vslideup.vi v8, v12, 5 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 77 +; RV64-NEXT: li a2, 56 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v20, v16, v8 +; RV64-NEXT: vrgather.vv v12, v16, v8 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 37 +; RV64-NEXT: li a2, 36 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 48 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vi v20, v8, 5, v0.t +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vrgather.vi v12, v16, 5, v0.t ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 37 +; RV64-NEXT: li a2, 36 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 69 -; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vi v24, v0, 3 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 61 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vrgather.vv v8, v16, v24 +; RV64-NEXT: vmv.v.v v16, v8 +; RV64-NEXT: vadd.vi v8, v0, -13 +; RV64-NEXT: addi a1, sp, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vadd.vi v24, v0, -13 -; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 53 +; RV64-NEXT: li a2, 80 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t +; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t ; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 37 +; RV64-NEXT: li a2, 36 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vmv.v.v v12, v8 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 37 +; RV64-NEXT: li a2, 36 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 7, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 3 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 5 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload @@ -1001,7 +1009,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: li a1, 192 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 77 +; RV64-NEXT: li a3, 56 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 @@ -1010,163 +1018,161 @@ ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.x v0, a1 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 3 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 48 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vrgather.vv v20, v8, v16, v0.t ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 5 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 69 -; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vi v24, v0, 4 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 61 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vrgather.vv v8, v16, v24 ; RV64-NEXT: li a1, 28 -; RV64-NEXT: vadd.vi v24, v0, -12 +; RV64-NEXT: vadd.vi v16, v0, -12 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, sp, a2 +; RV64-NEXT: addi a2, a2, 16 +; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v0, a1 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 53 +; RV64-NEXT: li a2, 80 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v8, v16, v24, v0.t +; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t ; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 5 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vmv.v.v v12, v8 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 5 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 7, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 13 +; RV64-NEXT: li a2, 20 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 4 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: li a2, 24 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vslideup.vi v16, v8, 6 +; RV64-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vslideup.vi v24, v8, 6 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 77 +; RV64-NEXT: li a2, 56 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vi v20, v8, 3 +; RV64-NEXT: vrgather.vi v16, v8, 3 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 3 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 48 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload -; RV64-NEXT: vrgather.vv v20, v8, v16, v0.t +; RV64-NEXT: vrgather.vv v16, v8, v24, v0.t ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 4 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: li a2, 24 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 69 -; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vi v24, v8, 5 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 61 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vrgather.vv v8, v0, v24 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 56 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 69 -; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: slli a1, a1, 6 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vi v8, v24, -11 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 77 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 53 +; RV64-NEXT: li a2, 80 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 77 +; RV64-NEXT: li a2, 72 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 45 +; RV64-NEXT: li a2, 56 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 @@ -1174,15 +1180,15 @@ ; RV64-NEXT: vrgather.vv v16, v24, v8, v0.t ; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a2, a1, 4 -; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: li a2, 24 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vmv.v.v v12, v16 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 29 +; RV64-NEXT: li a2, 28 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 @@ -1192,15 +1198,14 @@ ; RV64-NEXT: vse64.v v12, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: slli a3, a2, 5 -; RV64-NEXT: add a2, a3, a2 +; RV64-NEXT: slli a2, a2, 5 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 ; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload ; RV64-NEXT: vse64.v v8, (a1) ; RV64-NEXT: addi a1, a0, 192 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 37 +; RV64-NEXT: li a3, 36 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 @@ -1208,7 +1213,7 @@ ; RV64-NEXT: vse64.v v8, (a1) ; RV64-NEXT: addi a1, a0, 128 ; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 41 +; RV64-NEXT: li a3, 40 ; RV64-NEXT: mul a2, a2, a3 ; RV64-NEXT: add a2, sp, a2 ; RV64-NEXT: addi a2, a2, 16 @@ -1216,14 +1221,14 @@ ; RV64-NEXT: vse64.v v8, (a1) ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: li a2, 25 +; RV64-NEXT: li a2, 44 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: li a1, 86 +; RV64-NEXT: li a1, 88 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -225,9 +225,9 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vfmv.s.f v24, fa0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vfredusum.vs v8, v8, v24 +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <128 x half>, ptr %x @@ -655,9 +655,9 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vfmv.s.f v24, fa0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vfredusum.vs v8, v8, v24 +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <64 x float>, ptr %x @@ -692,12 +692,12 @@ ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfredusum.vs v8, v24, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vfredusum.vs v8, v24, v8 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <64 x half>, ptr %x @@ -1062,9 +1062,9 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: vfmv.s.f v24, fa0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vfredusum.vs v8, v8, v24 +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <32 x double>, ptr %x @@ -1253,12 +1253,12 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: lui a1, %hi(.LCPI72_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI72_0)(a1) ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vfmv.s.f v24, fa5 +; CHECK-NEXT: lui a0, %hi(.LCPI72_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI72_0)(a0) ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vfredmin.vs v8, v8, v24 +; CHECK-NEXT: vfmv.s.f v16, fa5 +; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <128 x half>, ptr %x @@ -1484,33 +1484,19 @@ declare double @llvm.vector.reduce.fmin.v32f64(<32 x double>) define double @vreduce_fmin_v32f64(ptr %x) { -; RV32-LABEL: vreduce_fmin_v32f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: lui a1, %hi(.LCPI82_0) -; RV32-NEXT: fld fa5, %lo(.LCPI82_0)(a1) -; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: vfmv.s.f v24, fa5 -; RV32-NEXT: vfmin.vv v8, v8, v16 -; RV32-NEXT: vfredmin.vs v8, v8, v24 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmin_v32f64: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI82_0) -; RV64-NEXT: fld fa5, %lo(.LCPI82_0)(a1) -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vfmv.s.f v24, fa5 -; RV64-NEXT: vfmin.vv v8, v8, v16 -; RV64-NEXT: vfredmin.vs v8, v8, v24 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmin_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle64.v v16, (a0) +; CHECK-NEXT: lui a0, %hi(.LCPI82_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI82_0)(a0) +; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: vfmv.s.f v16, fa5 +; CHECK-NEXT: vfredmin.vs v8, v8, v16 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <32 x double>, ptr %x %red = call double @llvm.vector.reduce.fmin.v32f64(<32 x double> %v) ret double %red @@ -1590,10 +1576,10 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: li a0, -512 -; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vfredmax.vs v8, v8, v24 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <128 x half>, ptr %x @@ -1819,33 +1805,19 @@ declare double @llvm.vector.reduce.fmax.v32f64(<32 x double>) define double @vreduce_fmax_v32f64(ptr %x) { -; RV32-LABEL: vreduce_fmax_v32f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: lui a1, %hi(.LCPI97_0) -; RV32-NEXT: fld fa5, %lo(.LCPI97_0)(a1) -; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: vfmv.s.f v24, fa5 -; RV32-NEXT: vfmax.vv v8, v8, v16 -; RV32-NEXT: vfredmax.vs v8, v8, v24 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v32f64: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI97_0) -; RV64-NEXT: fld fa5, %lo(.LCPI97_0)(a1) -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vfmv.s.f v24, fa5 -; RV64-NEXT: vfmax.vv v8, v8, v16 -; RV64-NEXT: vfredmax.vs v8, v8, v24 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle64.v v16, (a0) +; CHECK-NEXT: lui a0, %hi(.LCPI97_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI97_0)(a0) +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: vfmv.s.f v16, fa5 +; CHECK-NEXT: vfredmax.vs v8, v8, v16 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <32 x double>, ptr %x %red = call double @llvm.vector.reduce.fmax.v32f64(<32 x double> %v) ret double %red Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -141,9 +141,9 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vredsum.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, ptr %x @@ -515,9 +515,9 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vredsum.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, ptr %x @@ -534,12 +534,12 @@ ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma -; CHECK-NEXT: vredsum.vs v8, v24, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, ptr %x @@ -557,12 +557,12 @@ ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v24, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma -; CHECK-NEXT: vredsum.vs v8, v24, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, ptr %x @@ -882,9 +882,9 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vredsum.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, ptr %x @@ -901,12 +901,12 @@ ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vredsum.vs v8, v24, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, ptr %x @@ -924,12 +924,12 @@ ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v24, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vredsum.vs v8, v24, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, ptr %x @@ -1399,9 +1399,9 @@ ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vadd.vv v8, v8, v16 -; RV32-NEXT: vredsum.vs v8, v8, v24 +; RV32-NEXT: vmv.s.x v16, zero +; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma @@ -1415,9 +1415,9 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vadd.vv v8, v8, v16 -; RV64-NEXT: vredsum.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, zero +; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, ptr %x @@ -2549,9 +2549,9 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vredor.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, ptr %x @@ -2681,9 +2681,9 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vredor.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, ptr %x @@ -2796,9 +2796,9 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vredor.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, ptr %x @@ -2955,9 +2955,9 @@ ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vredor.vs v8, v8, v24 +; RV32-NEXT: vmv.s.x v16, zero +; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma @@ -2971,9 +2971,9 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vredor.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, zero +; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, ptr %x @@ -3167,9 +3167,9 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vredxor.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, ptr %x @@ -3299,9 +3299,9 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vredxor.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, ptr %x @@ -3414,9 +3414,9 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vredxor.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, ptr %x @@ -3573,9 +3573,9 @@ ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vxor.vv v8, v8, v16 -; RV32-NEXT: vredxor.vs v8, v8, v24 +; RV32-NEXT: vmv.s.x v16, zero +; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma @@ -3589,9 +3589,9 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vxor.vv v8, v8, v16 -; RV64-NEXT: vredxor.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, zero +; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, ptr %x @@ -3792,10 +3792,10 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vmin.vv v8, v8, v16 -; CHECK-NEXT: vredmin.vs v8, v8, v24 +; CHECK-NEXT: li a0, 127 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, ptr %x @@ -4005,11 +4005,11 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle16.v v16, (a0) +; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vmv.s.x v24, a0 -; RV32-NEXT: vmin.vv v8, v8, v16 -; RV32-NEXT: vredmin.vs v8, v8, v24 +; RV32-NEXT: vmv.s.x v16, a0 +; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; @@ -4020,11 +4020,11 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle16.v v16, (a0) +; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vmv.s.x v24, a0 -; RV64-NEXT: vmin.vv v8, v8, v16 -; RV64-NEXT: vredmin.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <128 x i16>, ptr %x @@ -4203,11 +4203,11 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle32.v v16, (a0) +; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vmv.s.x v24, a0 -; RV32-NEXT: vmin.vv v8, v8, v16 -; RV32-NEXT: vredmin.vs v8, v8, v24 +; RV32-NEXT: vmv.s.x v16, a0 +; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; @@ -4218,11 +4218,11 @@ ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle32.v v16, (a0) +; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vmv.s.x v24, a0 -; RV64-NEXT: vmin.vv v8, v8, v16 -; RV64-NEXT: vredmin.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i32>, ptr %x @@ -4458,11 +4458,11 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vmv.s.x v24, a0 -; RV64-NEXT: vmin.vv v8, v8, v16 -; RV64-NEXT: vredmin.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, ptr %x @@ -4675,10 +4675,10 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: vredmax.vs v8, v8, v24 +; CHECK-NEXT: li a0, -128 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, ptr %x @@ -4814,10 +4814,10 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: vredmax.vs v8, v8, v24 +; CHECK-NEXT: lui a0, 1048568 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, ptr %x @@ -4935,10 +4935,10 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vmax.vv v8, v8, v16 -; CHECK-NEXT: vredmax.vs v8, v8, v24 +; CHECK-NEXT: lui a0, 524288 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, ptr %x @@ -5164,11 +5164,11 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: vmax.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vmv.s.x v24, a0 -; RV64-NEXT: vmax.vv v8, v8, v16 -; RV64-NEXT: vredmax.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vredmax.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, ptr %x @@ -6034,9 +6034,9 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vredmaxu.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, ptr %x @@ -6166,9 +6166,9 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vredmaxu.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, ptr %x @@ -6281,9 +6281,9 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vmv.s.x v24, zero ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vredmaxu.vs v8, v8, v24 +; CHECK-NEXT: vmv.s.x v16, zero +; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, ptr %x @@ -6440,9 +6440,9 @@ ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) -; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vmaxu.vv v8, v8, v16 -; RV32-NEXT: vredmaxu.vs v8, v8, v24 +; RV32-NEXT: vmv.s.x v16, zero +; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma @@ -6456,9 +6456,9 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vmaxu.vv v8, v8, v16 -; RV64-NEXT: vredmaxu.vs v8, v8, v24 +; RV64-NEXT: vmv.s.x v16, zero +; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, ptr %x Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -2301,28 +2301,27 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32i32_v32f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: vsll.vi v24, v8, 3 -; RV32-NEXT: addi a2, a1, -16 -; RV32-NEXT: sltu a3, a1, a2 -; RV32-NEXT: addi a3, a3, -1 -; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: li a3, 16 +; RV32-NEXT: vsll.vi v16, v8, 3 +; RV32-NEXT: mv a2, a1 +; RV32-NEXT: bltu a1, a3, .LBB93_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB93_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v8, v24, 16 +; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a1, a1, a2 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: li a2, 16 -; RV32-NEXT: bltu a1, a2, .LBB93_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a1, 16 -; RV32-NEXT: .LBB93_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vmv1r.v v0, v1 -; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v32i32_v32f64: @@ -2375,28 +2374,27 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v32i32_v32f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: vsll.vi v24, v8, 3 -; RV32-NEXT: addi a2, a1, -16 -; RV32-NEXT: sltu a3, a1, a2 -; RV32-NEXT: addi a3, a3, -1 -; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: li a3, 16 +; RV32-NEXT: vsll.vi v16, v8, 3 +; RV32-NEXT: mv a2, a1 +; RV32-NEXT: bltu a1, a3, .LBB94_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB94_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v8, v24, 16 +; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a1, a1, a2 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: li a2, 16 -; RV32-NEXT: bltu a1, a2, .LBB94_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a1, 16 -; RV32-NEXT: .LBB94_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vmv1r.v v0, v1 -; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v32i32_v32f64: @@ -2435,28 +2433,27 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v32i32_v32f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; RV32-NEXT: vsll.vi v24, v8, 3 -; RV32-NEXT: addi a2, a1, -16 -; RV32-NEXT: sltu a3, a1, a2 -; RV32-NEXT: addi a3, a3, -1 -; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: li a3, 16 +; RV32-NEXT: vsll.vi v16, v8, 3 +; RV32-NEXT: mv a2, a1 +; RV32-NEXT: bltu a1, a3, .LBB95_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB95_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma -; RV32-NEXT: vslidedown.vi v8, v24, 16 +; RV32-NEXT: vslidedown.vi v24, v16, 16 +; RV32-NEXT: addi a2, a1, -16 +; RV32-NEXT: sltu a1, a1, a2 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: li a2, 16 -; RV32-NEXT: bltu a1, a2, .LBB95_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a1, 16 -; RV32-NEXT: .LBB95_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vmv1r.v v0, v1 -; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t +; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v32i32_v32f64: Index: llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -209,88 +209,86 @@ define @reverse_nxv8i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 -; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vid.v v10 -; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; RV32-BITS-UNKNOWN-NEXT: vid.v v8 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 -; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v10, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v10, v10, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v10, v8 +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v11, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-256-NEXT: vmv.v.i v8, 0 -; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vid.v v9 -; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 -; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-BITS-256-NEXT: vid.v v8 +; RV32-BITS-256-NEXT: vrsub.vx v8, v8, a0 +; RV32-BITS-256-NEXT: vmv.v.i v9, 0 +; RV32-BITS-256-NEXT: vmerge.vim v9, v9, 1, v0 +; RV32-BITS-256-NEXT: vrgather.vv v10, v9, v8 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-512-NEXT: vmv.v.i v8, 0 -; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vid.v v9 -; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 -; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-BITS-512-NEXT: vid.v v8 +; RV32-BITS-512-NEXT: vrsub.vx v8, v8, a0 +; RV32-BITS-512-NEXT: vmv.v.i v9, 0 +; RV32-BITS-512-NEXT: vmerge.vim v9, v9, 1, v0 +; RV32-BITS-512-NEXT: vrgather.vv v10, v9, v8 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 -; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vid.v v10 -; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; RV64-BITS-UNKNOWN-NEXT: vid.v v8 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 -; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v10, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v10, v10, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v10, v8 +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v11, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-256-NEXT: vmv.v.i v8, 0 -; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vid.v v9 -; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 -; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-BITS-256-NEXT: vid.v v8 +; RV64-BITS-256-NEXT: vrsub.vx v8, v8, a0 +; RV64-BITS-256-NEXT: vmv.v.i v9, 0 +; RV64-BITS-256-NEXT: vmerge.vim v9, v9, 1, v0 +; RV64-BITS-256-NEXT: vrgather.vv v10, v9, v8 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-512-NEXT: vmv.v.i v8, 0 -; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vid.v v9 -; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 -; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-BITS-512-NEXT: vid.v v8 +; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0 +; RV64-BITS-512-NEXT: vmv.v.i v9, 0 +; RV64-BITS-512-NEXT: vmerge.vim v9, v9, 1, v0 +; RV64-BITS-512-NEXT: vrgather.vv v10, v9, v8 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret @@ -497,18 +495,18 @@ define @reverse_nxv64i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 -; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vid.v v16 +; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v8, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v12, v16 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -531,17 +529,18 @@ ; ; RV32-BITS-512-LABEL: reverse_nxv64i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; RV32-BITS-512-NEXT: vmv.v.i v8, 0 -; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vid.v v16 +; RV32-BITS-512-NEXT: vid.v v8 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vrsub.vx v16, v16, a0 -; RV32-BITS-512-NEXT: vrgather.vv v28, v8, v16 -; RV32-BITS-512-NEXT: vrgather.vv v24, v12, v16 +; RV32-BITS-512-NEXT: vrsub.vx v8, v8, a0 +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; RV32-BITS-512-NEXT: vmv.v.i v16, 0 +; RV32-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; RV32-BITS-512-NEXT: vrgather.vv v28, v16, v8 +; RV32-BITS-512-NEXT: vrgather.vv v24, v20, v8 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-512-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -549,18 +548,18 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 -; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vid.v v16 +; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v8, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v12, v16 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -583,17 +582,18 @@ ; ; RV64-BITS-512-LABEL: reverse_nxv64i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; RV64-BITS-512-NEXT: vmv.v.i v8, 0 -; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vid.v v16 +; RV64-BITS-512-NEXT: vid.v v8 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vrsub.vx v16, v16, a0 -; RV64-BITS-512-NEXT: vrgather.vv v28, v8, v16 -; RV64-BITS-512-NEXT: vrgather.vv v24, v12, v16 +; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0 +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; RV64-BITS-512-NEXT: vmv.v.i v16, 0 +; RV64-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; RV64-BITS-512-NEXT: vrgather.vv v28, v16, v8 +; RV64-BITS-512-NEXT: vrgather.vv v24, v20, v8 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-512-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 Index: llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -15,27 +15,31 @@ ; SPILL-O0-NEXT: slli a1, a1, 1 ; SPILL-O0-NEXT: sub sp, sp, a1 ; SPILL-O0-NEXT: sw a0, 8(sp) # 4-byte Folded Spill +; SPILL-O0-NEXT: vmv1r.v v10, v9 +; SPILL-O0-NEXT: vmv1r.v v9, v8 ; SPILL-O0-NEXT: csrr a1, vlenb ; SPILL-O0-NEXT: add a1, sp, a1 ; SPILL-O0-NEXT: addi a1, a1, 16 -; SPILL-O0-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill +; SPILL-O0-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill +; SPILL-O0-NEXT: # implicit-def: $v8 ; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 +; SPILL-O0-NEXT: vfadd.vv v8, v9, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: lui a0, %hi(.L.str) ; SPILL-O0-NEXT: addi a0, a0, %lo(.L.str) ; SPILL-O0-NEXT: call puts@plt ; SPILL-O0-NEXT: addi a1, sp, 16 -; SPILL-O0-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl1r.v v10, (a1) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a1, vlenb ; SPILL-O0-NEXT: add a1, sp, a1 ; SPILL-O0-NEXT: addi a1, a1, 16 -; SPILL-O0-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload ; SPILL-O0-NEXT: # kill: def $x11 killed $x10 ; SPILL-O0-NEXT: lw a0, 8(sp) # 4-byte Folded Reload +; SPILL-O0-NEXT: # implicit-def: $v8 ; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 +; SPILL-O0-NEXT: vfadd.vv v8, v9, v10 ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 Index: llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -18,27 +18,31 @@ ; SPILL-O0-NEXT: slli a1, a1, 1 ; SPILL-O0-NEXT: sub sp, sp, a1 ; SPILL-O0-NEXT: sd a0, 16(sp) # 8-byte Folded Spill +; SPILL-O0-NEXT: vmv1r.v v10, v9 +; SPILL-O0-NEXT: vmv1r.v v9, v8 ; SPILL-O0-NEXT: csrr a1, vlenb ; SPILL-O0-NEXT: add a1, sp, a1 ; SPILL-O0-NEXT: addi a1, a1, 32 -; SPILL-O0-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill +; SPILL-O0-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill +; SPILL-O0-NEXT: # implicit-def: $v8 ; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 +; SPILL-O0-NEXT: vfadd.vv v8, v9, v10 ; SPILL-O0-NEXT: addi a0, sp, 32 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: lui a0, %hi(.L.str) ; SPILL-O0-NEXT: addi a0, a0, %lo(.L.str) ; SPILL-O0-NEXT: call puts@plt ; SPILL-O0-NEXT: addi a1, sp, 32 -; SPILL-O0-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl1r.v v10, (a1) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a1, vlenb ; SPILL-O0-NEXT: add a1, sp, a1 ; SPILL-O0-NEXT: addi a1, a1, 32 -; SPILL-O0-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload ; SPILL-O0-NEXT: # kill: def $x11 killed $x10 ; SPILL-O0-NEXT: ld a0, 16(sp) # 8-byte Folded Reload +; SPILL-O0-NEXT: # implicit-def: $v8 ; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 +; SPILL-O0-NEXT: vfadd.vv v8, v9, v10 ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 Index: llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll +++ llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll @@ -78,11 +78,11 @@ ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrsub.vi v12, v11, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vrsub.vi v8, v11, 7 ; CHECK-NEXT: li a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vrsub.vi v8, v11, 7 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -224,11 +224,11 @@ ; CHECK-NEXT: vid.v v14 ; CHECK-NEXT: vrsub.vi v16, v14, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v16 +; CHECK-NEXT: vrsub.vi v8, v14, 7 ; CHECK-NEXT: li a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vrsub.vi v8, v14, 7 ; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -373,11 +373,11 @@ ; CHECK-NEXT: vid.v v20 ; CHECK-NEXT: vrsub.vi v24, v20, 15 ; CHECK-NEXT: vrgather.vv v12, v8, v24 +; CHECK-NEXT: vrsub.vi v8, v20, 7 ; CHECK-NEXT: li a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu -; CHECK-NEXT: vrsub.vi v8, v20, 7 ; CHECK-NEXT: vrgather.vv v12, v16, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -604,11 +604,11 @@ ; CHECK-NEXT: vid.v v14 ; CHECK-NEXT: vrsub.vi v16, v14, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v16 +; CHECK-NEXT: vrsub.vi v8, v14, 7 ; CHECK-NEXT: li a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vrsub.vi v8, v14, 7 ; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -724,11 +724,11 @@ ; CHECK-NEXT: vid.v v20 ; CHECK-NEXT: vrsub.vi v24, v20, 15 ; CHECK-NEXT: vrgather.vv v12, v8, v24 +; CHECK-NEXT: vrsub.vi v8, v20, 7 ; CHECK-NEXT: li a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu -; CHECK-NEXT: vrsub.vi v8, v20, 7 ; CHECK-NEXT: vrgather.vv v12, v16, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll +++ llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll @@ -9,18 +9,18 @@ define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; CHECK-LABEL: vec_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: srli a1, a0, 1 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vmv.v.x v10, a1 -; CHECK-NEXT: vsll.vv v11, v8, v9 -; CHECK-NEXT: vsra.vv v9, v11, v9 +; CHECK-NEXT: vsll.vv v10, v8, v9 +; CHECK-NEXT: vsra.vv v9, v10, v9 ; CHECK-NEXT: vmsne.vv v9, v8, v9 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: srli a1, a0, 1 +; CHECK-NEXT: vmv.v.x v11, a1 ; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: slli a0, a0, 63 -; CHECK-NEXT: vmerge.vxm v8, v10, a0, v0 +; CHECK-NEXT: vmerge.vxm v8, v11, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: vmerge.vvm v8, v11, v8, v0 +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %tmp = call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> %x, <2 x i64> %y) ret <2 x i64> %tmp @@ -29,19 +29,19 @@ define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; CHECK-LABEL: vec_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: addiw a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.v.x v10, a0 -; CHECK-NEXT: vsll.vv v11, v8, v9 -; CHECK-NEXT: vsra.vv v9, v11, v9 +; CHECK-NEXT: vsll.vv v10, v8, v9 +; CHECK-NEXT: vsra.vv v9, v10, v9 ; CHECK-NEXT: vmsne.vv v9, v8, v9 +; CHECK-NEXT: lui a0, 524288 +; CHECK-NEXT: addiw a0, a0, -1 +; CHECK-NEXT: vmv.v.x v11, a0 ; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: slli a0, a0, 31 -; CHECK-NEXT: vmerge.vxm v8, v10, a0, v0 +; CHECK-NEXT: vmerge.vxm v8, v11, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: vmerge.vvm v8, v11, v8, v0 +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %tmp = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> %x, <4 x i32> %y) ret <4 x i32> %tmp @@ -50,17 +50,17 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; CHECK-LABEL: vec_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addiw a1, a0, -1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: vsll.vv v10, v8, v9 ; CHECK-NEXT: vsra.vv v9, v10, v9 -; CHECK-NEXT: vmsne.vv v8, v8, v9 -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0 +; CHECK-NEXT: vmsne.vv v9, v8, v9 +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addiw a1, a0, -1 +; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: vmv.v.v v0, v9 +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %tmp = call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> %x, <8 x i16> %y) ret <8 x i16> %tmp @@ -69,17 +69,17 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; CHECK-LABEL: vec_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v10, a0 -; CHECK-NEXT: vsll.vv v11, v8, v9 -; CHECK-NEXT: vsra.vv v9, v11, v9 +; CHECK-NEXT: vsll.vv v10, v8, v9 +; CHECK-NEXT: vsra.vv v9, v10, v9 ; CHECK-NEXT: vmsne.vv v9, v8, v9 +; CHECK-NEXT: li a0, 127 +; CHECK-NEXT: vmv.v.x v11, a0 ; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: li a0, 128 -; CHECK-NEXT: vmerge.vxm v8, v10, a0, v0 +; CHECK-NEXT: vmerge.vxm v8, v11, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 -; CHECK-NEXT: vmerge.vvm v8, v11, v8, v0 +; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %tmp = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> %x, <16 x i8> %y) ret <16 x i8> %tmp Index: llvm/test/CodeGen/RISCV/rvv/stepvector.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -591,11 +591,11 @@ ; ; RV64-LABEL: add_stepvector_nxv16i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vv v8, v8, v8 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: vadd.vx v16, v8, a0 ; RV64-NEXT: ret entry: Index: llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll +++ llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll @@ -17,12 +17,12 @@ ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-NEXT: vadd.vi v12, v11, -16 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.v.x v0, a0 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV32-NEXT: vadd.vi v12, v11, -16 ; RV32-NEXT: vrgather.vv v9, v8, v12, v0.t ; RV32-NEXT: vmsne.vi v9, v9, 0 ; RV32-NEXT: vadd.vi v12, v11, 1 @@ -45,12 +45,12 @@ ; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-NEXT: vadd.vi v12, v11, -16 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v0, a0 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; RV64-NEXT: vadd.vi v12, v11, -16 ; RV64-NEXT: vrgather.vv v9, v8, v12, v0.t ; RV64-NEXT: vmsne.vi v9, v9, 0 ; RV64-NEXT: vadd.vi v12, v11, 1 @@ -107,14 +107,16 @@ define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) { ; CHECK-LABEL: vector_deinterleave_v2i64_v4i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma -; CHECK-NEXT: vslidedown.vi v10, v8, 2 +; CHECK-NEXT: vslidedown.vi v12, v8, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vrgather.vi v9, v8, 1 -; CHECK-NEXT: vrgather.vi v9, v10, 1, v0.t -; CHECK-NEXT: vslideup.vi v8, v10, 1 +; CHECK-NEXT: vrgather.vi v10, v12, 1, v0.t +; CHECK-NEXT: vslideup.vi v8, v12, 1 +; CHECK-NEXT: vmv.v.v v9, v10 ; CHECK-NEXT: ret %retval = call {<2 x i64>, <2 x i64>} @llvm.experimental.vector.deinterleave2.v4i64(<4 x i64> %vec) ret {<2 x i64>, <2 x i64>} %retval @@ -194,14 +196,16 @@ define {<2 x double>, <2 x double>} @vector_deinterleave_v2f64_v4f64(<4 x double> %vec) { ; CHECK-LABEL: vector_deinterleave_v2f64_v4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma -; CHECK-NEXT: vslidedown.vi v10, v8, 2 +; CHECK-NEXT: vslidedown.vi v12, v8, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vrgather.vi v9, v8, 1 -; CHECK-NEXT: vrgather.vi v9, v10, 1, v0.t -; CHECK-NEXT: vslideup.vi v8, v10, 1 +; CHECK-NEXT: vrgather.vi v10, v12, 1, v0.t +; CHECK-NEXT: vslideup.vi v8, v12, 1 +; CHECK-NEXT: vmv.v.v v9, v10 ; CHECK-NEXT: ret %retval = call {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double> %vec) ret {<2 x double>, <2 x double>} %retval Index: llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -91,24 +91,39 @@ define {, } @vector_deinterleave_nxv64i1_nxv128i1( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv64i1_nxv128i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v28, v8 +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: vmv.v.i v24, 0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v16, v24, 1, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v24, v16, 0 +; CHECK-NEXT: vnsrl.wi v12, v16, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmv1r.v v0, v28 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmerge.vim v24, v24, 1, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v28, v8, 0 +; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs1r.v v0, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v24, v16, 8 -; CHECK-NEXT: vnsrl.wi v28, v8, 8 +; CHECK-NEXT: vnsrl.wi v4, v16, 8 +; CHECK-NEXT: vnsrl.wi v0, v24, 8 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vmsne.vi v8, v24, 0 +; CHECK-NEXT: vmsne.vi v8, v0, 0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv128i1( %vec) ret {, } %retval @@ -119,10 +134,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 -; CHECK-NEXT: vnsrl.wi v0, v24, 8 +; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v4, v16, 8 +; CHECK-NEXT: vnsrl.wi v0, v24, 8 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv128i8( %vec) @@ -134,10 +149,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 -; CHECK-NEXT: vnsrl.wi v0, v24, 16 +; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v4, v16, 16 +; CHECK-NEXT: vnsrl.wi v0, v24, 16 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv64i16( %vec) @@ -152,8 +167,8 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v20, v24, a0 ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vnsrl.wi v0, v8, 0 ; CHECK-NEXT: vnsrl.wi v4, v24, 0 +; CHECK-NEXT: vnsrl.wi v0, v8, 0 ; CHECK-NEXT: vmv8r.v v8, v0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv32i32( %vec) @@ -218,26 +233,16 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vrgather.vv v16, v24, v0 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vmv4r.v v24, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmv4r.v v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmv4r.v v28, v16 -; CHECK-NEXT: vmv8r.v v16, v24 +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv4r.v v20, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 @@ -350,10 +355,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 -; CHECK-NEXT: vnsrl.wi v0, v24, 16 +; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v4, v16, 16 +; CHECK-NEXT: vnsrl.wi v0, v24, 16 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv64f16( %vec) @@ -368,8 +373,8 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v20, v24, a0 ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vnsrl.wi v0, v8, 0 ; CHECK-NEXT: vnsrl.wi v4, v24, 0 +; CHECK-NEXT: vnsrl.wi v0, v8, 0 ; CHECK-NEXT: vmv8r.v v8, v0 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv32f32( %vec) @@ -434,26 +439,16 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vrgather.vv v16, v24, v0 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vmv4r.v v24, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmv4r.v v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmv4r.v v28, v16 -; CHECK-NEXT: vmv8r.v v16, v24 +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv4r.v v20, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 Index: llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll =================================================================== --- llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -722,21 +722,21 @@ ; ; RV64MV-LABEL: test_srem_vec: ; RV64MV: # %bb.0: -; RV64MV-NEXT: ld a1, 0(a0) +; RV64MV-NEXT: lbu a1, 12(a0) ; RV64MV-NEXT: lwu a2, 8(a0) -; RV64MV-NEXT: srli a3, a1, 2 -; RV64MV-NEXT: lbu a4, 12(a0) -; RV64MV-NEXT: slli a5, a2, 62 -; RV64MV-NEXT: or a3, a5, a3 -; RV64MV-NEXT: srai a3, a3, 31 -; RV64MV-NEXT: slli a4, a4, 32 +; RV64MV-NEXT: slli a1, a1, 32 +; RV64MV-NEXT: ld a3, 0(a0) +; RV64MV-NEXT: or a1, a2, a1 +; RV64MV-NEXT: slli a1, a1, 29 +; RV64MV-NEXT: srai a1, a1, 31 +; RV64MV-NEXT: srli a4, a3, 2 +; RV64MV-NEXT: slli a2, a2, 62 ; RV64MV-NEXT: or a2, a2, a4 -; RV64MV-NEXT: slli a2, a2, 29 ; RV64MV-NEXT: lui a4, %hi(.LCPI3_0) ; RV64MV-NEXT: ld a4, %lo(.LCPI3_0)(a4) ; RV64MV-NEXT: srai a2, a2, 31 -; RV64MV-NEXT: slli a1, a1, 31 -; RV64MV-NEXT: srai a1, a1, 31 +; RV64MV-NEXT: slli a3, a3, 31 +; RV64MV-NEXT: srai a3, a3, 31 ; RV64MV-NEXT: mulh a4, a2, a4 ; RV64MV-NEXT: srli a5, a4, 63 ; RV64MV-NEXT: srai a4, a4, 1 @@ -744,27 +744,27 @@ ; RV64MV-NEXT: lui a5, %hi(.LCPI3_1) ; RV64MV-NEXT: ld a5, %lo(.LCPI3_1)(a5) ; RV64MV-NEXT: add a2, a2, a4 -; RV64MV-NEXT: slli a4, a4, 2 -; RV64MV-NEXT: add a2, a2, a4 -; RV64MV-NEXT: mulh a4, a3, a5 -; RV64MV-NEXT: srli a5, a4, 63 -; RV64MV-NEXT: srai a4, a4, 1 -; RV64MV-NEXT: add a4, a4, a5 -; RV64MV-NEXT: lui a5, %hi(.LCPI3_2) -; RV64MV-NEXT: ld a5, %lo(.LCPI3_2)(a5) -; RV64MV-NEXT: add a3, a3, a4 ; RV64MV-NEXT: slli a4, a4, 3 -; RV64MV-NEXT: sub a3, a3, a4 -; RV64MV-NEXT: mulh a4, a1, a5 +; RV64MV-NEXT: sub a2, a2, a4 +; RV64MV-NEXT: mulh a4, a3, a5 ; RV64MV-NEXT: srli a5, a4, 63 ; RV64MV-NEXT: add a4, a4, a5 ; RV64MV-NEXT: li a5, 6 ; RV64MV-NEXT: mul a4, a4, a5 -; RV64MV-NEXT: sub a1, a1, a4 +; RV64MV-NEXT: sub a3, a3, a4 +; RV64MV-NEXT: lui a4, %hi(.LCPI3_2) +; RV64MV-NEXT: ld a4, %lo(.LCPI3_2)(a4) ; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64MV-NEXT: vslide1down.vx v8, v8, a1 ; RV64MV-NEXT: vslide1down.vx v8, v8, a3 ; RV64MV-NEXT: vslide1down.vx v8, v8, a2 +; RV64MV-NEXT: mulh a2, a1, a4 +; RV64MV-NEXT: srli a3, a2, 63 +; RV64MV-NEXT: srai a2, a2, 1 +; RV64MV-NEXT: add a2, a2, a3 +; RV64MV-NEXT: slli a3, a2, 2 +; RV64MV-NEXT: add a1, a1, a2 +; RV64MV-NEXT: add a1, a1, a3 +; RV64MV-NEXT: vslide1down.vx v8, v8, a1 ; RV64MV-NEXT: vslidedown.vi v8, v8, 1 ; RV64MV-NEXT: lui a1, %hi(.LCPI3_3) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI3_3)