diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -434,6 +434,7 @@ SDValue visitOR(SDNode *N); SDValue visitORLike(SDValue N0, SDValue N1, SDNode *N); SDValue visitXOR(SDNode *N); + SDValue SimplifyVCastOp(SDNode *N, const SDLoc &DL); SDValue SimplifyVBinOp(SDNode *N, const SDLoc &DL); SDValue visitSHL(SDNode *N); SDValue visitSRA(SDNode *N); @@ -11777,6 +11778,10 @@ EVT VT = N->getValueType(0); SDLoc DL(N); + if (VT.isVector()) + if (SDValue FoldedVOp = SimplifyVCastOp(N, DL)) + return FoldedVOp; + if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes)) return Res; @@ -12022,6 +12027,10 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + if (VT.isVector()) + if (SDValue FoldedVOp = SimplifyVCastOp(N, SDLoc(N))) + return FoldedVOp; + if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes)) return Res; @@ -15576,6 +15585,10 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + if (VT.isVector()) + if (SDValue FoldedVOp = SimplifyVCastOp(N, SDLoc(N))) + return FoldedVOp; + // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) @@ -23059,6 +23072,30 @@ return DAG.getBuildVector(VT, DL, Ops); } +/// Visit a vector cast operation, like FP_EXTEND. +SDValue DAGCombiner::SimplifyVCastOp(SDNode *N, const SDLoc &DL) { + EVT VT = N->getValueType(0); + assert(VT.isVector() && "SimplifyVCastOp only works on vectors!"); + EVT EltVT = VT.getVectorElementType(); + + SDValue N0 = N->getOperand(0); + EVT SrcVT = N0->getValueType(0); + unsigned Opcode = N->getOpcode(); + EVT SrcEltVT = SrcVT.getVectorElementType(); + + // TODO promote operation might be also good here? + if (N0.getOpcode() == ISD::SPLAT_VECTOR && + TLI.isOperationLegalOrCustom(Opcode, EltVT)) { + SDValue IndexC = DAG.getVectorIdxConstant(0, DL); + SDValue Elt = + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcEltVT, N0, IndexC); + SDValue ScalarBO = DAG.getNode(Opcode, DL, EltVT, Elt, N->getFlags()); + return DAG.getSplatVector(VT, DL, ScalarBO); + } + + return SDValue(); +} + /// Visit a binary vector operation, like ADD. SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) { EVT VT = N->getValueType(0); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -410,11 +410,6 @@ (!cast(instruction_name#"_VV_"#vti.Vti.LMul.MX) vti.Vti.RegClass:$rs2, vti.Vti.RegClass:$rs1, vti.Vti.AVL, vti.Vti.Log2SEW)>; - def : Pat<(op (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))), - (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatFPOp vti.Vti.ScalarRegClass:$rs1))))), - (!cast(instruction_name#"_V"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX) - vti.Vti.RegClass:$rs2, vti.Vti.ScalarRegClass:$rs1, - vti.Vti.AVL, vti.Vti.Log2SEW)>; def : Pat<(op (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))), (vti.Wti.Vector (SplatFPOp (fpext_oneuse vti.Vti.ScalarRegClass:$rs1)))), (!cast(instruction_name#"_V"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX) @@ -430,11 +425,6 @@ (!cast(instruction_name#"_WV_"#vti.Vti.LMul.MX) vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1, vti.Vti.AVL, vti.Vti.Log2SEW)>; - def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2), - (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatFPOp vti.Vti.ScalarRegClass:$rs1))))), - (!cast(instruction_name#"_W"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX) - vti.Wti.RegClass:$rs2, vti.Vti.ScalarRegClass:$rs1, - vti.Vti.AVL, vti.Vti.Log2SEW)>; def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2), (vti.Wti.Vector (SplatFPOp (fpext_oneuse vti.Vti.ScalarRegClass:$rs1)))), (!cast(instruction_name#"_W"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX) @@ -456,7 +446,7 @@ (!cast(instruction_name#"_VV_"#vti.Vti.LMul.MX) vti.Wti.RegClass:$rd, vti.Vti.RegClass:$rs1, vti.Vti.RegClass:$rs2, vti.Vti.AVL, vti.Vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatFPOp vti.Vti.ScalarRegClass:$rs1)))), + def : Pat<(fma (vti.Wti.Vector (SplatFPOp (fpext_oneuse vti.Vti.ScalarRegClass:$rs1))), (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))), (vti.Wti.Vector vti.Wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX) @@ -475,13 +465,13 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (fpext_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1))), + def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), (fneg (wti.Vector (fpext_oneuse (vti.Vector vti.RegClass:$rs2)))), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (fneg (wti.Vector (fpext_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1))))), + def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (fpext_oneuse (vti.Vector vti.RegClass:$rs2)), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) @@ -500,7 +490,7 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (wti.Vector (fpext_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)))), + def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), (fpext_oneuse (vti.Vector vti.RegClass:$rs2)), (fneg wti.RegClass:$rd)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) @@ -519,13 +509,13 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (wti.Vector (fpext_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)))), + def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), (fneg (wti.Vector (fpext_oneuse (vti.Vector vti.RegClass:$rs2)))), wti.RegClass:$rd), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (fneg (wti.Vector (fpext_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1))))), + def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (fpext_oneuse (vti.Vector vti.RegClass:$rs2)), wti.RegClass:$rd), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -604,11 +604,9 @@ define @vfcopysign_exttrunc_vf_nxv1f32_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -634,11 +632,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -866,11 +862,9 @@ define @vfcopysign_exttrunc_vf_nxv8f32_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -896,11 +890,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1082,13 +1074,9 @@ define @vfcopysign_exttrunc_vf_nxv1f64_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1116,13 +1104,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1148,11 +1132,9 @@ define @vfcopysign_exttrunc_vf_nxv1f64_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1178,11 +1160,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1354,13 +1334,9 @@ define @vfcopysign_exttrunc_vf_nxv8f64_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1388,13 +1364,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1420,11 +1392,9 @@ define @vfcopysign_exttrunc_vf_nxv8f64_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1450,11 +1420,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vnsra_wv_nxv1i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv1i32_sext: @@ -17,11 +17,20 @@ } define @vnsra_wx_i32_nxv1i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv1i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsra.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv1i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vnsra.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv1i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -58,12 +67,21 @@ } define @vnsra_wx_i32_nxv2i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv2i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv2i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsra.wx v10, v8, a0 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv2i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsra.vx v10, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -101,12 +119,21 @@ } define @vnsra_wx_i32_nxv4i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv4i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv4i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vnsra.wx v12, v8, a0 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv4i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsra.vx v12, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -144,12 +171,21 @@ } define @vnsra_wx_i32_nxv8i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv8i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv8i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vnsra.wx v16, v8, a0 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv8i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsra.vx v16, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -186,11 +222,21 @@ } define @vnsra_wx_i32_nxv1i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv1i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsra.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv1i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vnsra.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv1i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -227,12 +273,22 @@ } define @vnsra_wx_i32_nxv2i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv2i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv2i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsra.wx v10, v8, a0 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv2i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsra.vx v10, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -270,12 +326,22 @@ } define @vnsra_wx_i32_nxv4i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv4i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv4i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vnsra.wx v12, v8, a0 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv4i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsra.vx v12, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -313,12 +379,22 @@ } define @vnsra_wx_i32_nxv8i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsra_wx_i32_nxv8i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vnsra_wx_i32_nxv8i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vnsra.wx v16, v8, a0 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsra_wx_i32_nxv8i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsra.vx v16, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vnsrl_wv_nxv1i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv1i32_sext: @@ -17,11 +17,20 @@ } define @vnsrl_wx_i32_nxv1i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv1i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv1i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vnsrl.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv1i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -58,12 +67,21 @@ } define @vnsrl_wx_i32_nxv2i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv2i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv2i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsrl.wx v10, v8, a0 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv2i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsrl.vx v10, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -101,12 +119,21 @@ } define @vnsrl_wx_i32_nxv4i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv4i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv4i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vnsrl.wx v12, v8, a0 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv4i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsrl.vx v12, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -144,12 +171,21 @@ } define @vnsrl_wx_i32_nxv8i32_sext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv8i32_sext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv8i32_sext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vnsrl.wx v16, v8, a0 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv8i32_sext: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -186,11 +222,21 @@ } define @vnsrl_wx_i32_nxv1i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv1i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv1i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vnsrl.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv1i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -227,12 +273,22 @@ } define @vnsrl_wx_i32_nxv2i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv2i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv2i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsrl.wx v10, v8, a0 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv2i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsrl.vx v10, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -270,12 +326,22 @@ } define @vnsrl_wx_i32_nxv4i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv4i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv4i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vnsrl.wx v12, v8, a0 +; RV32-NEXT: vmv.v.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv4i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsrl.vx v12, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -313,12 +379,22 @@ } define @vnsrl_wx_i32_nxv8i32_zext( %va, i32 %b) { -; CHECK-LABEL: vnsrl_wx_i32_nxv8i32_zext: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vnsrl_wx_i32_nxv8i32_zext: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vnsrl.wx v16, v8, a0 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i32_nxv8i32_zext: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vncvt.x.x.w v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vwadd_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwadd_vv_nxv1i64: @@ -29,12 +29,20 @@ } define @vwadd_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwadd.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwadd.vx v9, v8, a0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsext.vf2 v9, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vadd.vx v8, v9, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -44,12 +52,21 @@ } define @vwaddu_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwaddu.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwaddu.vx v9, v8, a0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vzext.vf2 v9, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vadd.vx v8, v9, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -83,11 +100,18 @@ } define @vwadd_wx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_wx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwadd.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_wx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwadd.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_wx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -96,11 +120,19 @@ } define @vwaddu_wx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_wx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwaddu.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_wx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwaddu.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_wx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -135,12 +167,20 @@ } define @vwadd_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwadd.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_vx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwadd.vx v10, v8, a0 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_vx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf2 v10, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vadd.vx v8, v10, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -150,12 +190,21 @@ } define @vwaddu_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwaddu.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_vx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwaddu.vx v10, v8, a0 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_vx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf2 v10, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vadd.vx v8, v10, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -189,11 +238,18 @@ } define @vwadd_wx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_wx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwadd.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_wx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwadd.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_wx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -202,11 +258,19 @@ } define @vwaddu_wx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_wx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwaddu.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_wx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwaddu.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_wx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -241,12 +305,20 @@ } define @vwadd_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwadd.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_vx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwadd.vx v12, v8, a0 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_vx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v12, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vadd.vx v8, v12, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -256,12 +328,21 @@ } define @vwaddu_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwaddu.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_vx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwaddu.vx v12, v8, a0 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_vx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vzext.vf2 v12, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vadd.vx v8, v12, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -295,11 +376,18 @@ } define @vwadd_wx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_wx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwadd.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_wx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwadd.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_wx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -308,11 +396,19 @@ } define @vwaddu_wx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_wx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwaddu.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_wx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwaddu.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_wx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -347,12 +443,20 @@ } define @vwadd_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwadd.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwadd.vx v16, v8, a0 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vadd.vx v8, v16, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -362,12 +466,21 @@ } define @vwaddu_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwaddu.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwaddu.vx v16, v8, a0 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf2 v16, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vadd.vx v8, v16, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -401,11 +514,18 @@ } define @vwadd_wx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwadd_wx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwadd.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwadd_wx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwadd.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwadd_wx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -414,11 +534,19 @@ } define @vwaddu_wx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwaddu_wx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwaddu.wx v8, v8, a0 -; CHECK-NEXT: ret +; RV32-LABEL: vwaddu_wx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwaddu.wx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vwaddu_wx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vwmacc_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmacc_vv_nxv1i32: @@ -20,12 +20,21 @@ } define @vwmacc_vx_nxv1i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmacc_vx_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmacc.vx v9, a0, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmacc_vx_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmacc.vx v9, a0, v8 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmacc_vx_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsext.vf2 v10, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v10, a0, v9 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -52,12 +61,22 @@ } define @vwmaccu_vx_nxv1i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccu_vx_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmaccu.vx v9, a0, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccu_vx_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmaccu.vx v9, a0, v8 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccu_vx_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vzext.vf2 v10, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v10, a0, v9 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -84,12 +103,21 @@ } define @vwmaccsu_vx_nxv1i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccsu_vx_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmaccsu.vx v9, a0, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccsu_vx_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmaccsu.vx v9, a0, v8 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccsu_vx_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vzext.vf2 v10, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v10, a0, v9 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -101,12 +129,22 @@ } define @vwmaccus_vx_nxv1i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccus_vx_nxv1i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmaccus.vx v9, a0, v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccus_vx_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmaccus.vx v9, a0, v8 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccus_vx_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsext.vf2 v10, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v10, a0, v9 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -133,12 +171,21 @@ } define @vwmacc_vx_nxv2i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmacc_vx_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmacc.vx v10, a0, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmacc_vx_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmacc.vx v10, a0, v8 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmacc_vx_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf2 v12, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v12, a0, v10 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -165,12 +212,22 @@ } define @vwmaccu_vx_nxv2i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccu_vx_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmaccu.vx v10, a0, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccu_vx_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmaccu.vx v10, a0, v8 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccu_vx_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf2 v12, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v12, a0, v10 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -197,12 +254,21 @@ } define @vwmaccsu_vx_nxv2i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccsu_vx_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmaccsu.vx v10, a0, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccsu_vx_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmaccsu.vx v10, a0, v8 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccsu_vx_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf2 v12, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v12, a0, v10 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -214,12 +280,22 @@ } define @vwmaccus_vx_nxv2i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccus_vx_nxv2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmaccus.vx v10, a0, v8 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccus_vx_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmaccus.vx v10, a0, v8 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccus_vx_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf2 v12, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v12, a0, v10 +; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -246,12 +322,21 @@ } define @vwmacc_vx_nxv4i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmacc_vx_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmacc.vx v12, a0, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmacc_vx_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmacc.vx v12, a0, v8 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmacc_vx_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v16, a0, v12 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -278,12 +363,22 @@ } define @vwmaccu_vx_nxv4i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccu_vx_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmaccu.vx v12, a0, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccu_vx_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmaccu.vx v12, a0, v8 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccu_vx_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vzext.vf2 v16, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v16, a0, v12 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -310,12 +405,21 @@ } define @vwmaccsu_vx_nxv4i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccsu_vx_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmaccsu.vx v12, a0, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccsu_vx_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmaccsu.vx v12, a0, v8 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccsu_vx_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vzext.vf2 v16, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v16, a0, v12 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -327,12 +431,22 @@ } define @vwmaccus_vx_nxv4i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccus_vx_nxv4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmaccus.vx v12, a0, v8 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccus_vx_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmaccus.vx v12, a0, v8 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccus_vx_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v16, a0, v12 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -359,12 +473,21 @@ } define @vwmacc_vx_nxv8i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmacc_vx_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmacc.vx v16, a0, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmacc_vx_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmacc.vx v16, a0, v8 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmacc_vx_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v24, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v24, a0, v16 +; RV64-NEXT: vmv.v.v v8, v24 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to @@ -391,12 +514,22 @@ } define @vwmaccu_vx_nxv8i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccu_vx_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmaccu.vx v16, a0, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccu_vx_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmaccu.vx v16, a0, v8 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccu_vx_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf2 v24, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v24, a0, v16 +; RV64-NEXT: vmv.v.v v8, v24 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -423,12 +556,21 @@ } define @vwmaccsu_vx_nxv8i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccsu_vx_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmaccsu.vx v16, a0, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccsu_vx_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmaccsu.vx v16, a0, v8 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccsu_vx_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf2 v24, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmadd.vx v24, a0, v16 +; RV64-NEXT: vmv.v.v v8, v24 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = zext %va to @@ -440,12 +582,22 @@ } define @vwmaccus_vx_nxv8i32( %va, i32 %b, %vc) { -; CHECK-LABEL: vwmaccus_vx_nxv8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmaccus.vx v16, a0, v8 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmaccus_vx_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmaccus.vx v16, a0, v8 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmaccus_vx_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v24, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmadd.vx v24, a0, v16 +; RV64-NEXT: vmv.v.v v8, v24 +; RV64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vd = sext %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vwmul_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwmul_vv_nxv1i64: @@ -42,12 +42,20 @@ } define @vwmul_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwmul_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmul.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmul_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmul.vx v9, v8, a0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmul_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsext.vf2 v9, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmul.vx v8, v9, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -57,12 +65,21 @@ } define @vwmulu_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwmulu_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmulu.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulu_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmulu.vx v9, v8, a0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulu_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vzext.vf2 v9, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v9, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = zext %va to @@ -72,12 +89,21 @@ } define @vwmulsu_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwmulsu_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwmulsu.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulsu_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vwmulsu.vx v9, v8, a0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulsu_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsext.vf2 v9, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v9, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -126,12 +152,20 @@ } define @vwmul_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwmul_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmul.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmul_vx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmul.vx v10, v8, a0 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmul_vx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf2 v10, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmul.vx v8, v10, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -141,12 +175,21 @@ } define @vwmulu_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwmulu_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmulu.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulu_vx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmulu.vx v10, v8, a0 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulu_vx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf2 v10, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v10, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = zext %va to @@ -156,12 +199,21 @@ } define @vwmulsu_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwmulsu_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwmulsu.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulsu_vx_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vwmulsu.vx v10, v8, a0 +; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulsu_vx_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf2 v10, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v10, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -210,12 +262,20 @@ } define @vwmul_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwmul_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmul.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmul_vx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmul.vx v12, v8, a0 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmul_vx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v12, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmul.vx v8, v12, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -225,12 +285,21 @@ } define @vwmulu_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwmulu_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmulu.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulu_vx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmulu.vx v12, v8, a0 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulu_vx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vzext.vf2 v12, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v12, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = zext %va to @@ -240,12 +309,21 @@ } define @vwmulsu_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwmulsu_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwmulsu.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulsu_vx_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vwmulsu.vx v12, v8, a0 +; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulsu_vx_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v12, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v12, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -294,12 +372,20 @@ } define @vwmul_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwmul_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmul.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmul_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmul.vx v16, v8, a0 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmul_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vmul.vx v8, v16, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to @@ -309,12 +395,21 @@ } define @vwmulu_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwmulu_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmulu.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulu_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmulu.vx v16, v8, a0 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulu_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf2 v16, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v16, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = zext %va to @@ -324,12 +419,21 @@ } define @vwmulsu_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwmulsu_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwmulsu.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; RV32-LABEL: vwmulsu_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vwmulsu.vx v16, v8, a0 +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vwmulsu_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: vmul.vx v8, v16, a0 +; RV64-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer %vc = sext %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK64 define @vwsub_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwsub_vv_nxv1i64: @@ -29,12 +29,20 @@ } define @vwsub_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwsub.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_vx_nxv1i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK32-NEXT: vwsub.vx v9, v8, a0 +; CHECK32-NEXT: vmv1r.v v8, v9 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_vx_nxv1i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK64-NEXT: vsext.vf2 v9, v8 +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsub.vx v8, v9, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -44,12 +52,21 @@ } define @vwsubu_vx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_vx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwsubu.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_vx_nxv1i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK32-NEXT: vwsubu.vx v9, v8, a0 +; CHECK32-NEXT: vmv1r.v v8, v9 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_vx_nxv1i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK64-NEXT: vzext.vf2 v9, v8 +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsub.vx v8, v9, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -83,11 +100,18 @@ } define @vwsub_wx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_wx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwsub.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_wx_nxv1i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK32-NEXT: vwsub.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_wx_nxv1i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -96,11 +120,19 @@ } define @vwsubu_wx_nxv1i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_wx_nxv1i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vwsubu.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_wx_nxv1i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK32-NEXT: vwsubu.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_wx_nxv1i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -135,12 +167,20 @@ } define @vwsub_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwsub.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_vx_nxv2i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK32-NEXT: vwsub.vx v10, v8, a0 +; CHECK32-NEXT: vmv2r.v v8, v10 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_vx_nxv2i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK64-NEXT: vsext.vf2 v10, v8 +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsub.vx v8, v10, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -150,12 +190,21 @@ } define @vwsubu_vx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_vx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwsubu.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_vx_nxv2i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK32-NEXT: vwsubu.vx v10, v8, a0 +; CHECK32-NEXT: vmv2r.v v8, v10 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_vx_nxv2i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK64-NEXT: vzext.vf2 v10, v8 +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsub.vx v8, v10, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -189,11 +238,18 @@ } define @vwsub_wx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_wx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwsub.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_wx_nxv2i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK32-NEXT: vwsub.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_wx_nxv2i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -202,11 +258,19 @@ } define @vwsubu_wx_nxv2i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_wx_nxv2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vwsubu.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_wx_nxv2i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK32-NEXT: vwsubu.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_wx_nxv2i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -241,12 +305,20 @@ } define @vwsub_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwsub.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_vx_nxv4i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK32-NEXT: vwsub.vx v12, v8, a0 +; CHECK32-NEXT: vmv4r.v v8, v12 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_vx_nxv4i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK64-NEXT: vsext.vf2 v12, v8 +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsub.vx v8, v12, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -256,12 +328,21 @@ } define @vwsubu_vx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_vx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwsubu.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_vx_nxv4i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK32-NEXT: vwsubu.vx v12, v8, a0 +; CHECK32-NEXT: vmv4r.v v8, v12 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_vx_nxv4i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK64-NEXT: vzext.vf2 v12, v8 +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsub.vx v8, v12, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -295,11 +376,18 @@ } define @vwsub_wx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_wx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwsub.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_wx_nxv4i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK32-NEXT: vwsub.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_wx_nxv4i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -308,11 +396,19 @@ } define @vwsubu_wx_nxv4i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_wx_nxv4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vwsubu.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_wx_nxv4i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK32-NEXT: vwsubu.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_wx_nxv4i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to @@ -347,12 +443,20 @@ } define @vwsub_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwsub.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_vx_nxv8i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK32-NEXT: vwsub.vx v16, v8, a0 +; CHECK32-NEXT: vmv8r.v v8, v16 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_vx_nxv8i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK64-NEXT: vsext.vf2 v16, v8 +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsub.vx v8, v16, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = sext %va to @@ -362,12 +466,21 @@ } define @vwsubu_vx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_vx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwsubu.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_vx_nxv8i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK32-NEXT: vwsubu.vx v16, v8, a0 +; CHECK32-NEXT: vmv8r.v v8, v16 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_vx_nxv8i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK64-NEXT: vzext.vf2 v16, v8 +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsub.vx v8, v16, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = zext %va to @@ -401,11 +514,18 @@ } define @vwsub_wx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwsub_wx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwsub.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsub_wx_nxv8i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK32-NEXT: vwsub.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsub_wx_nxv8i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: sext.w a0, a0 +; CHECK64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = sext %splat to @@ -414,11 +534,19 @@ } define @vwsubu_wx_nxv8i64( %va, i32 %b) { -; CHECK-LABEL: vwsubu_wx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vwsubu.wx v8, v8, a0 -; CHECK-NEXT: ret +; CHECK32-LABEL: vwsubu_wx_nxv8i64: +; CHECK32: # %bb.0: +; CHECK32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK32-NEXT: vwsubu.wx v8, v8, a0 +; CHECK32-NEXT: ret +; +; CHECK64-LABEL: vwsubu_wx_nxv8i64: +; CHECK64: # %bb.0: +; CHECK64-NEXT: slli a0, a0, 32 +; CHECK64-NEXT: srli a0, a0, 32 +; CHECK64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK64-NEXT: vsub.vx v8, v8, a0 +; CHECK64-NEXT: ret %head = insertelement poison, i32 %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vb = zext %splat to