diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -50,6 +50,9 @@ bool SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt); bool SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt); bool SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt); + bool selectVSplat(SDValue N, SDValue &SplatVal); + bool selectVSplatSimm5(SDValue N, SDValue &SplatVal); + bool selectVSplatUimm5(SDValue N, SDValue &SplatVal); // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -432,6 +432,52 @@ return true; } +bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) { + if (N.getOpcode() != ISD::SPLAT_VECTOR && + N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64) + return false; + SplatVal = N.getOperand(0); + return true; +} + +bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) { + if ((N.getOpcode() != ISD::SPLAT_VECTOR && + N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64) || + !isa(N.getOperand(0))) + return false; + + int64_t SplatImm = cast(N.getOperand(0))->getSExtValue(); + + // TODO: First truncate the constant to the vector element type since the + // bits will be implicitly truncated anyway. This would catch cases where the + // immediate was zero-extended instead of sign-extended: we would still want + // to match (i8 -1) -> (XLenVT 255) as a simm5, for example + if (!isInt<5>(SplatImm)) + return false; + + SplatVal = + CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT()); + + return true; +} + +bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) { + if ((N.getOpcode() != ISD::SPLAT_VECTOR && + N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64) || + !isa(N.getOperand(0))) + return false; + + int64_t SplatImm = cast(N.getOperand(0))->getSExtValue(); + + if (!isUInt<5>(SplatImm)) + return false; + + SplatVal = + CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT()); + + return true; +} + // Merge an ADDI into the offset of a load/store instruction where possible. // (load (addi base, off1), off2) -> (load base, off1+off2) // (store val, (addi base, off1), off2) -> (store val, base, off1+off2) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -82,6 +82,9 @@ // sign extended from the vector element size. NOTE: The result size will // never be less than the vector element size. VMV_X_S, + // Splats an i64 scalar to a vector type (with element type i64) where the + // scalar is a sign-extended i32. + SPLAT_VECTOR_I64, }; } // namespace RISCVISD @@ -265,6 +268,7 @@ SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; + SDValue lowerSPLATVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -361,6 +361,13 @@ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); } + + for (auto VT : MVT::integer_scalable_vector_valuetypes()) + setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); + + // We must custom-lower SPLAT_VECTOR vXi64 on RV32 + if (!Subtarget.is64Bit()) + setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom); } // Function alignments. @@ -625,6 +632,8 @@ return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); } + case ISD::SPLAT_VECTOR: + return lowerSPLATVECTOR(Op, DAG); } } @@ -1042,6 +1051,53 @@ return DAG.getMergeValues(Parts, DL); } +// Custom-lower a SPLAT_VECTOR where XLEN(SplatVal)) { + if (isInt<32>(CVal->getSExtValue())) + return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, + DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32)); + } + + // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not + // to accidentally sign-extend the 32-bit halves to the e64 SEW: + // vmv.v.x vX, hi + // vsll.vx vX, vX, /*32*/ + // vmv.v.x vY, lo + // vsll.vx vY, vY, /*32*/ + // vsrl.vx vY, vY, /*32*/ + // vor.vv vX, vX, vY + SDValue One = DAG.getConstant(1, DL, MVT::i32); + SDValue Zero = DAG.getConstant(0, DL, MVT::i32); + SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT); + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero); + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One); + + Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); + Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV); + Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV); + + if (isNullConstant(Hi)) + return Lo; + + Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi); + Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV); + + return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); +} + SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); @@ -3426,6 +3482,7 @@ NODE_NAME_CASE(GORCI) NODE_NAME_CASE(GORCIW) NODE_NAME_CASE(VMV_X_S) + NODE_NAME_CASE(SPLAT_VECTOR_I64) } // clang-format on return nullptr; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -949,58 +949,6 @@ defm "" : VPseudoBinaryM_VI; } -//===----------------------------------------------------------------------===// -// Helpers to define the SDNode patterns. -//===----------------------------------------------------------------------===// - -multiclass VPatUSLoadStoreSDNode -{ - defvar load_instr = !cast("PseudoVLE"#sew#"_V_"#vlmul.MX); - defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); - // Load - def : Pat<(type (load reg_rs1:$rs1)), - (load_instr reg_rs1:$rs1, VLMax, sew)>; - // Store - def : Pat<(store type:$rs2, reg_rs1:$rs1), - (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>; -} - -multiclass VPatUSLoadStoreSDNodes { - foreach vti = AllVectors in - defm "" : VPatUSLoadStoreSDNode; -} - -class VPatBinarySDNode : - Pat<(result_type (vop - (op_type op_reg_class:$rs1), - (op_type op_reg_class:$rs2))), - (!cast(instruction_name#"_VV_"# vlmul.MX) - op_reg_class:$rs1, - op_reg_class:$rs2, - VLMax, sew)>; - -multiclass VPatBinarySDNode -{ - foreach vti = AllIntegerVectors in - def : VPatBinarySDNode; -} - //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -2084,15 +2032,6 @@ //===----------------------------------------------------------------------===// let Predicates = [HasStdExtV] in { -// Whole-register vector patterns. - -// 7.4. Vector Unit-Stride Instructions -defm "" : VPatUSLoadStoreSDNodes; -defm "" : VPatUSLoadStoreSDNodes; - -// 12.1. Vector Single-Width Integer Add and Subtract -defm "" : VPatBinarySDNode; - //===----------------------------------------------------------------------===// // 7. Vector Loads and Stores //===----------------------------------------------------------------------===// @@ -2520,3 +2459,6 @@ defm "" : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; defm "" : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; } // Predicates = [HasStdExtV, HasStdExtF] + +// Include the non-intrinsic ISel patterns +include "RISCVInstrInfoVSDPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -0,0 +1,166 @@ +//===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// This file contains the required infrastructure and SDNode patterns to +/// support code generation for the standard 'V' (Vector) extension, version +/// 0.9. This version is still experimental as the 'V' extension hasn't been +/// ratified yet. +/// +/// This file is included from and depends upon RISCVInstrInfoVPseudos.td +/// +/// Note: the patterns for RVV intrinsics are found in +/// RISCVInstrInfoVPseudos.td. +/// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Helpers to define the SDNode patterns. +//===----------------------------------------------------------------------===// + +def SDTSplatI64 : SDTypeProfile<1, 1, [ + SDTCVecEltisVT<0, i64>, SDTCisVT<1, i32> +]>; + +def rv32_splat_i64 : SDNode<"RISCVISD::SPLAT_VECTOR_I64", SDTSplatI64>; + +// Penalize the generic form with Complexity=1 to give the simm5/uimm5 variants +// precedence +def SplatPat : ComplexPattern; + +def SplatPat_simm5 : ComplexPattern; +def SplatPat_uimm5 : ComplexPattern; + +multiclass VPatUSLoadStoreSDNode +{ + defvar load_instr = !cast("PseudoVLE"#sew#"_V_"#vlmul.MX); + defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); + // Load + def : Pat<(type (load reg_rs1:$rs1)), + (load_instr reg_rs1:$rs1, VLMax, sew)>; + // Store + def : Pat<(store type:$rs2, reg_rs1:$rs1), + (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>; +} + +multiclass VPatUSLoadStoreSDNodes { + foreach vti = AllVectors in + defm "" : VPatUSLoadStoreSDNode; +} + +class VPatBinarySDNode_VV : + Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2))), + (!cast(instruction_name#"_VV_"# vlmul.MX) + op_reg_class:$rs1, + op_reg_class:$rs2, + VLMax, sew)>; + +class VPatBinarySDNode_XI : + Pat<(result_type (vop + (vop_type vop_reg_class:$rs1), + (vop_type (SplatPatKind xop_kind:$rs2)))), + (!cast(instruction_name#_#suffix#_# vlmul.MX) + vop_reg_class:$rs1, + xop_kind:$rs2, + VLMax, sew)>; + +multiclass VPatBinarySDNode_VV_VX_VI +{ + foreach vti = AllIntegerVectors in { + def : VPatBinarySDNode_VV; + def : VPatBinarySDNode_XI; + def : VPatBinarySDNode_XI(SplatPat#_#ImmType), + ImmType>; + } +} + +//===----------------------------------------------------------------------===// +// Patterns. +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV] in { + +// 7.4. Vector Unit-Stride Instructions +defm "" : VPatUSLoadStoreSDNodes; +defm "" : VPatUSLoadStoreSDNodes; + +// 12.1. Vector Single-Width Integer Add and Subtract +defm "" : VPatBinarySDNode_VV_VX_VI; + +// 12.5. Vector Bitwise Logical Instructions +defm "" : VPatBinarySDNode_VV_VX_VI; + +// 12.6. Vector Single-Width Bit Shift Instructions +defm "" : VPatBinarySDNode_VV_VX_VI; +defm "" : VPatBinarySDNode_VV_VX_VI; + +} // Predicates = [HasStdExtV] + +//===----------------------------------------------------------------------===// +// Vector Splats +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV] in { +foreach vti = AllIntegerVectors in { + def : Pat<(vti.Vector (splat_vector GPR:$rs1)), + (!cast("PseudoVMV_V_X_" # vti.LMul.MX) + GPR:$rs1, VLMax, vti.SEW)>; + def : Pat<(vti.Vector (splat_vector simm5:$rs1)), + (!cast("PseudoVMV_V_I_" # vti.LMul.MX) + simm5:$rs1, VLMax, vti.SEW)>; +} +} // Predicates = [HasStdExtV] + +let Predicates = [HasStdExtV, IsRV32] in { +foreach vti = AllIntegerVectors in { + if !eq(vti.SEW, 64) then { + def : Pat<(vti.Vector (rv32_splat_i64 GPR:$rs1)), + (!cast("PseudoVMV_V_X_" # vti.LMul.MX) + GPR:$rs1, VLMax, vti.SEW)>; + def : Pat<(vti.Vector (rv32_splat_i64 simm5:$rs1)), + (!cast("PseudoVMV_V_I_" # vti.LMul.MX) + simm5:$rs1, VLMax, vti.SEW)>; + } +} +} // Predicates = [HasStdExtV, IsRV32] diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll @@ -0,0 +1,822 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vadd_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vadd_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vadd.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vadd.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vadd.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vadd.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv64.ll @@ -0,0 +1,794 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vadd_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vadd_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vadd_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i8 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vadd_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vadd_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i16 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vadd_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vadd_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i32 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vadd_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vadd_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} + +define @vadd_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vadd_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v16, v16, 2 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = add %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll @@ -0,0 +1,1109 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vor_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv64i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv32i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vor_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vor_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vor_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vor_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vor_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv16i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vor.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vor.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vor.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vor.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll @@ -0,0 +1,1081 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vor_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vor_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vor_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vor_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv64i8_2( %va) { +; CHECK-LABEL: vor_vx_nxv64i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vor_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vor_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vor_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv32i16_2( %va) { +; CHECK-LABEL: vor_vx_nxv32i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vor_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vor_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vor_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vor_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vor_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vor_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vor_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv16i32_2( %va) { +; CHECK-LABEL: vor_vx_nxv16i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv1i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv2i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv4i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vor_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vor_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, -1 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vor_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vi v16, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + +define @vor_vx_nxv8i64_2( %va) { +; CHECK-LABEL: vor_vx_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vor.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = or %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll @@ -0,0 +1,845 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vshl_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vshl_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vshl_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vshl_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vshl_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vshl_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vsll.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsll.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vsll.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vsll.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll @@ -0,0 +1,817 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vshl_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vshl_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vshl_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vshl_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vshl_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vshl_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vshl_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vshl_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vshl_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vshl_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vshl_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vshl_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vshl_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vshl_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vshl_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vshl_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsll.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + +define @vshl_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vshl_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsll.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll @@ -0,0 +1,266 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV32V +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV64V + +define @vsplat_nxv8i64_1() { +; RV32V-LABEL: vsplat_nxv8i64_1: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.i v16, -1 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8i64_1: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV64V-NEXT: vmv.v.i v16, -1 +; RV64V-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8i64_2() { +; RV32V-LABEL: vsplat_nxv8i64_2: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.i v16, 4 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8i64_2: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV64V-NEXT: vmv.v.i v16, 4 +; RV64V-NEXT: ret + %head = insertelement undef, i64 4, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8i64_3() { +; RV32V-LABEL: vsplat_nxv8i64_3: +; RV32V: # %bb.0: +; RV32V-NEXT: addi a0, zero, 255 +; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.x v16, a0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8i64_3: +; RV64V: # %bb.0: +; RV64V-NEXT: addi a0, zero, 255 +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vmv.v.x v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 255, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8i64_4() { +; RV32V-LABEL: vsplat_nxv8i64_4: +; RV32V: # %bb.0: +; RV32V-NEXT: lui a0, 1028096 +; RV32V-NEXT: addi a0, a0, -1281 +; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.x v8, a0 +; RV32V-NEXT: addi a0, zero, 32 +; RV32V-NEXT: vsll.vx v8, v8, a0 +; RV32V-NEXT: vsrl.vx v16, v8, a0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8i64_4: +; RV64V: # %bb.0: +; RV64V-NEXT: addi a0, zero, 251 +; RV64V-NEXT: slli a0, a0, 24 +; RV64V-NEXT: addi a0, a0, -1281 +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vmv.v.x v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 4211079935, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8i64_5(i64 %a) { +; RV32V-LABEL: vsplat_nxv8i64_5: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.x v8, a1 +; RV32V-NEXT: addi a1, zero, 32 +; RV32V-NEXT: vsll.vx v8, v8, a1 +; RV32V-NEXT: vmv.v.x v16, a0 +; RV32V-NEXT: vsll.vx v16, v16, a1 +; RV32V-NEXT: vsrl.vx v16, v16, a1 +; RV32V-NEXT: vor.vv v16, v16, v8 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8i64_5: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vmv.v.x v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 %a, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vadd_vx_nxv8i64_6( %v) { +; RV32V-LABEL: vadd_vx_nxv8i64_6: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV32V-NEXT: vadd.vi v16, v16, 2 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_6: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vi v16, v16, 2 +; RV64V-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} + +define @vadd_vx_nxv8i64_7( %v) { +; RV32V-LABEL: vadd_vx_nxv8i64_7: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV32V-NEXT: vadd.vi v16, v16, -1 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_7: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vi v16, v16, -1 +; RV64V-NEXT: ret + %head = insertelement undef, i64 -1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} + +define @vadd_vx_nxv8i64_8( %v) { +; RV32V-LABEL: vadd_vx_nxv8i64_8: +; RV32V: # %bb.0: +; RV32V-NEXT: addi a0, zero, 255 +; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV32V-NEXT: vadd.vx v16, v16, a0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_8: +; RV64V: # %bb.0: +; RV64V-NEXT: addi a0, zero, 255 +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vx v16, v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 255, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} + +define @vadd_vx_nxv8i64_9( %v) { +; RV32V-LABEL: vadd_vx_nxv8i64_9: +; RV32V: # %bb.0: +; RV32V-NEXT: lui a0, 503808 +; RV32V-NEXT: addi a0, a0, -1281 +; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV32V-NEXT: vadd.vx v16, v16, a0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_9: +; RV64V: # %bb.0: +; RV64V-NEXT: lui a0, 503808 +; RV64V-NEXT: addiw a0, a0, -1281 +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vx v16, v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 2063596287, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} + +define @vadd_vx_nxv8i64_10( %v) { +; RV32V-LABEL: vadd_vx_nxv8i64_10: +; RV32V: # %bb.0: +; RV32V-NEXT: lui a0, 1028096 +; RV32V-NEXT: addi a0, a0, -1281 +; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.x v8, a0 +; RV32V-NEXT: addi a0, zero, 32 +; RV32V-NEXT: vsll.vx v8, v8, a0 +; RV32V-NEXT: vsrl.vx v8, v8, a0 +; RV32V-NEXT: vadd.vv v16, v16, v8 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_10: +; RV64V: # %bb.0: +; RV64V-NEXT: addi a0, zero, 251 +; RV64V-NEXT: slli a0, a0, 24 +; RV64V-NEXT: addi a0, a0, -1281 +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vx v16, v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 4211079935, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} + +define @vadd_vx_nxv8i64_11( %v) { +; RV32V-LABEL: vadd_vx_nxv8i64_11: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.i v8, 1 +; RV32V-NEXT: addi a0, zero, 32 +; RV32V-NEXT: vsll.vx v8, v8, a0 +; RV32V-NEXT: lui a1, 1028096 +; RV32V-NEXT: addi a1, a1, -1281 +; RV32V-NEXT: vmv.v.x v24, a1 +; RV32V-NEXT: vsll.vx v24, v24, a0 +; RV32V-NEXT: vsrl.vx v24, v24, a0 +; RV32V-NEXT: vor.vv v8, v24, v8 +; RV32V-NEXT: vadd.vv v16, v16, v8 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_11: +; RV64V: # %bb.0: +; RV64V-NEXT: addi a0, zero, 507 +; RV64V-NEXT: slli a0, a0, 24 +; RV64V-NEXT: addi a0, a0, -1281 +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vx v16, v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 8506047231, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} + +define @vadd_vx_nxv8i64_12( %v, i64 %a) { +; RV32V-LABEL: vadd_vx_nxv8i64_12: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; RV32V-NEXT: vmv.v.x v8, a1 +; RV32V-NEXT: addi a1, zero, 32 +; RV32V-NEXT: vsll.vx v8, v8, a1 +; RV32V-NEXT: vmv.v.x v24, a0 +; RV32V-NEXT: vsll.vx v24, v24, a1 +; RV32V-NEXT: vsrl.vx v24, v24, a1 +; RV32V-NEXT: vor.vv v8, v24, v8 +; RV32V-NEXT: vadd.vv v16, v16, v8 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vadd_vx_nxv8i64_12: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; RV64V-NEXT: vadd.vx v16, v16, a0 +; RV64V-NEXT: ret + %head = insertelement undef, i64 %a, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vret = add %v, %splat + ret %vret +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv32.ll @@ -0,0 +1,845 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsrl_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vsrl_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vsrl_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vsrl_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vsrl_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vsrl_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vsrl.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsrl.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vsrl.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vsrl.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv64.ll @@ -0,0 +1,817 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vsrl_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv64i8_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i8 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv64i8_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv64i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i16_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i16 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv32i16_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv32i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vsrl_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i32_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i32 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv16i32_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv16i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv1i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv2i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv4i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vsrl_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i64_0( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vsrl.vi v16, v16, 31 +; CHECK-NEXT: ret + %head = insertelement undef, i64 31, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} + +define @vsrl_vx_nxv8i64_1( %va) { +; CHECK-LABEL: vsrl_vx_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 32 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vsrl.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 32, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = lshr %va, %splat + ret %vc +} +