diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -800,6 +800,9 @@ return true; } + // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X)) + virtual bool preferScalarizeSplat(unsigned Opc) const { return true; } + /// Return true if the target wants to use the optimization that /// turns ext(promotableInst1(...(promotableInstN(load)))) into /// promotedInst1(...(promotedInstN(ext(load)))). diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -440,6 +440,7 @@ SDValue visitOR(SDNode *N); SDValue visitORLike(SDValue N0, SDValue N1, SDNode *N); SDValue visitXOR(SDNode *N); + SDValue SimplifyVCastOp(SDNode *N, const SDLoc &DL); SDValue SimplifyVBinOp(SDNode *N, const SDLoc &DL); SDValue visitSHL(SDNode *N); SDValue visitSRA(SDNode *N); @@ -12039,6 +12040,10 @@ EVT VT = N->getValueType(0); SDLoc DL(N); + if (VT.isVector()) + if (SDValue FoldedVOp = SimplifyVCastOp(N, DL)) + return FoldedVOp; + // sext(undef) = 0 because the top bit will all be the same. if (N0.isUndef()) return DAG.getConstant(0, DL, VT); @@ -12288,6 +12293,10 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + if (VT.isVector()) + if (SDValue FoldedVOp = SimplifyVCastOp(N, SDLoc(N))) + return FoldedVOp; + // zext(undef) = 0 if (N0.isUndef()) return DAG.getConstant(0, SDLoc(N), VT); @@ -15911,6 +15920,10 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + if (VT.isVector()) + if (SDValue FoldedVOp = SimplifyVCastOp(N, SDLoc(N))) + return FoldedVOp; + // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) @@ -23676,6 +23689,39 @@ return DAG.getBuildVector(VT, DL, Ops); } +/// Visit a vector cast operation, like FP_EXTEND. +SDValue DAGCombiner::SimplifyVCastOp(SDNode *N, const SDLoc &DL) { + EVT VT = N->getValueType(0); + assert(VT.isVector() && "SimplifyVCastOp only works on vectors!"); + EVT EltVT = VT.getVectorElementType(); + unsigned Opcode = N->getOpcode(); + + SDValue N0 = N->getOperand(0); + EVT SrcVT = N0->getValueType(0); + EVT SrcEltVT = SrcVT.getVectorElementType(); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + + // TODO: promote operation might be also good here? + int Index0; + SDValue Src0 = DAG.getSplatSourceVector(N0, Index0); + if (Src0 && + (N0.getOpcode() == ISD::SPLAT_VECTOR || + TLI.isExtractVecEltCheap(VT, Index0)) && + TLI.isOperationLegalOrCustom(Opcode, EltVT) && + TLI.preferScalarizeSplat(Opcode)) { + SDValue IndexC = DAG.getVectorIdxConstant(Index0, DL); + SDValue Elt = + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcEltVT, Src0, IndexC); + SDValue ScalarBO = DAG.getNode(Opcode, DL, EltVT, Elt, N->getFlags()); + if (VT.isScalableVector()) + return DAG.getSplatVector(VT, DL, ScalarBO); + SmallVector Ops(VT.getVectorNumElements(), ScalarBO); + return DAG.getBuildVector(VT, DL, Ops); + } + + return SDValue(); +} + /// Visit a binary vector operation, like ADD. SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) { EVT VT = N->getValueType(0); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -387,6 +387,8 @@ bool isIntDivCheap(EVT VT, AttributeList Attr) const override; + bool preferScalarizeSplat(unsigned Opc) const override; + bool softPromoteHalfType() const override { return true; } /// Return the register type for a given MVT, ensuring vectors are treated diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12860,6 +12860,14 @@ return OptSize && !VT.isVector(); } +bool RISCVTargetLowering::preferScalarizeSplat(unsigned Opc) const { + // Scalarize zero_ext and sign_ext might stop match to widening instruction in + // some situation. + if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND) + return false; + return true; +} + #define GET_REGISTER_MATCHER #include "RISCVGenAsmMatcher.inc" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -477,9 +477,8 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse - (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), - (vti.Mask true_mask), (XLenVT srcvalue))), + def : Pat<(fma (wti.Vector (SplatFPOp + (fpext_oneuse vti.ScalarRegClass:$rs1))), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -503,9 +502,7 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (riscv_fpextend_vl_oneuse - (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), - (vti.Mask true_mask), (XLenVT srcvalue)), + def : Pat<(fma (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)), (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)))), @@ -513,9 +510,7 @@ (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse - (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), - (vti.Mask true_mask), (XLenVT srcvalue)))), + def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), (fneg wti.RegClass:$rd)), @@ -538,9 +533,7 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse - (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), - (vti.Mask true_mask), (XLenVT srcvalue))), + def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), (fneg wti.RegClass:$rd)), @@ -563,9 +556,7 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse - (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), - (vti.Mask true_mask), (XLenVT srcvalue))), + def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))), (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)))), @@ -573,9 +564,7 @@ (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; - def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse - (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), - (vti.Mask true_mask), (XLenVT srcvalue)))), + def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), wti.RegClass:$rd), diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -604,11 +604,9 @@ define @vfcopysign_exttrunc_vf_nxv1f32_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -634,11 +632,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -866,11 +862,9 @@ define @vfcopysign_exttrunc_vf_nxv8f32_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -896,11 +890,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: fcvt.s.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1082,13 +1074,9 @@ define @vfcopysign_exttrunc_vf_nxv1f64_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1116,13 +1104,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1148,11 +1132,9 @@ define @vfcopysign_exttrunc_vf_nxv1f64_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1178,11 +1160,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1354,13 +1334,9 @@ define @vfcopysign_exttrunc_vf_nxv8f64_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1388,13 +1364,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.h ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1420,11 +1392,9 @@ define @vfcopysign_exttrunc_vf_nxv8f64_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1450,11 +1420,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: fcvt.d.s ft0, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vnsra_wv_nxv1i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv1i32_sext: @@ -341,3 +341,6 @@ %y = trunc %x to ret %y } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vnsrl_wv_nxv1i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv1i32_sext: @@ -341,3 +341,6 @@ %y = trunc %x to ret %y } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vwadd_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwadd_vv_nxv1i64: @@ -425,3 +425,6 @@ %vc = add %va, %vb ret %vc } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vwmacc_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmacc_vv_nxv1i32: @@ -455,3 +455,6 @@ %y = add %x, %vc ret %y } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vwmul_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwmul_vv_nxv1i64: @@ -337,3 +337,6 @@ %ve = mul %vc, %vd ret %ve } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK64 define @vwsub_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwsub_vv_nxv1i64: @@ -425,3 +425,6 @@ %vc = sub %va, %vb ret %vc } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK32: {{.*}} +; CHECK64: {{.*}}