diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -258,4 +258,14 @@ defm vfadd : RISCVBinaryAAX; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; + + def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -77,6 +77,11 @@ GREVIW, GORCI, GORCIW, + // Vector Extension + // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT + // sign extended from the vector element size. NOTE: The result size will + // never be less than the vector element size. + VMV_X_S, }; } // namespace RISCVISD diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -348,10 +348,12 @@ setBooleanVectorContents(ZeroOrOneBooleanContent); // RVV intrinsics may have illegal operands. + // We also need to custom legalize vmv.x.s and vmv.s.x. setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); if (Subtarget.is64Bit()) - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); } // Function alignments. @@ -1032,9 +1034,9 @@ assert(II->ExtendedOperand < Op.getNumOperands()); SmallVector Operands(Op->op_begin(), Op->op_end()); SDValue &ScalarOp = Operands[II->ExtendedOperand]; - if (ScalarOp.getValueType() == MVT::i8 || - ScalarOp.getValueType() == MVT::i16 || - ScalarOp.getValueType() == MVT::i32) { + EVT OpVT = ScalarOp.getValueType(); + if (OpVT == MVT::i8 || OpVT == MVT::i16 || + (OpVT == MVT::i32 && Subtarget.is64Bit())) { ScalarOp = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), @@ -1051,6 +1053,10 @@ EVT PtrVT = getPointerTy(DAG.getDataLayout()); return DAG.getRegister(RISCV::X4, PtrVT); } + case Intrinsic::riscv_vmv_x_s: + assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!"); + return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), + Op.getOperand(1)); } } @@ -1272,6 +1278,25 @@ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); break; } + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); + switch (IntNo) { + default: + llvm_unreachable( + "Don't know how to custom type legalize this intrinsic!"); + case Intrinsic::riscv_vmv_x_s: { + EVT VT = N->getValueType(0); + assert((VT == MVT::i8 || VT == MVT::i16 || + (Subtarget.is64Bit() && VT == MVT::i32)) && + "Unexpected custom legalisation!"); + SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, + Subtarget.getXLenVT(), N->getOperand(1)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); + break; + } + } + break; + } } } @@ -1693,6 +1718,11 @@ // more precise answer could be calculated for SRAW depending on known // bits in the shift amount. return 33; + case RISCVISD::VMV_X_S: + // The number of sign bits of the scalar result is computed by obtaining the + // element type of the input vector operand, substracting its width from the + // XLEN, and then adding one (sign bit within the element type). + return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; } return 1; @@ -3332,6 +3362,7 @@ NODE_NAME_CASE(GREVIW) NODE_NAME_CASE(GORCI) NODE_NAME_CASE(GORCIW) + NODE_NAME_CASE(VMV_X_S) } // clang-format on return nullptr; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -967,8 +967,9 @@ let vm = 1 in { def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd), (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">; -def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd), - (ins GPR:$rs1), "vmv.s.x", "$vd, $rs1">; +let Constraints = "$vd = $vd_wb" in +def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb), + (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -14,6 +14,10 @@ /// //===----------------------------------------------------------------------===// +def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S", + SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>, + SDTCisInt<1>]>>; + // X0 has special meaning for vsetvl/vsetvli. // rd | rs1 | AVL value | Effect on vl //-------------------------------------------------------------- @@ -1176,6 +1180,30 @@ } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// +// 17.1. Integer Scalar Move Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV] in { +let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, + Uses = [VL, VTYPE] in { + foreach m = MxList.m in { + let VLMul = m.value in { + let SEWIndex = 2, BaseInstr = VMV_X_S in + def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd), + (ins m.vrclass:$rs2, ixlenimm:$sew), + []>, RISCVVPseudo; + let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X, + Constraints = "$rd = $rs1" in + def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd), + (ins m.vrclass:$rs1, GPR:$rs2, + GPR:$vl, ixlenimm:$sew), + []>, RISCVVPseudo; + } + } +} +} + +//===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// let Predicates = [HasStdExtV] in { @@ -1299,3 +1327,18 @@ defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; } // Predicates = [HasStdExtV, HasStdExtF] + +//===----------------------------------------------------------------------===// +// 17.1. Integer Scalar Move Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV] in { +foreach vti = AllIntegerVectors in { + def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)), + (!cast("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>; + def : Pat<(vti.Vector (int_riscv_vmv_s_x (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, GPR:$vl)), + (!cast("PseudoVMV_S_X_" # vti.LMul.MX) + (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>; +} +} // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll @@ -0,0 +1,236 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vmv.s.x.nxv1i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv1i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv2i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv4i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv8i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv16i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv16i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv16i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv32i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv32i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv32i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv64i8(, i8, i32) + +define @intrinsic_vmv.s.x_x_nxv64i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv64i8( %0, i8 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv1i16(, i16, i32) + +define @intrinsic_vmv.s.x_x_nxv1i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i16( %0, i16 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i16(, i16, i32) + +define @intrinsic_vmv.s.x_x_nxv2i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i16( %0, i16 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i16(, i16, i32) + +define @intrinsic_vmv.s.x_x_nxv4i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i16( %0, i16 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i16(, i16, i32) + +define @intrinsic_vmv.s.x_x_nxv8i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i16( %0, i16 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv16i16(, i16, i32) + +define @intrinsic_vmv.s.x_x_nxv16i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv16i16( %0, i16 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv32i16(, i16, i32) + +define @intrinsic_vmv.s.x_x_nxv32i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv32i16( %0, i16 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv1i32(, i32, i32) + +define @intrinsic_vmv.s.x_x_nxv1i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i32( %0, i32 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i32(, i32, i32) + +define @intrinsic_vmv.s.x_x_nxv2i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i32( %0, i32 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i32(, i32, i32) + +define @intrinsic_vmv.s.x_x_nxv4i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i32( %0, i32 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i32(, i32, i32) + +define @intrinsic_vmv.s.x_x_nxv8i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i32( %0, i32 %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv16i32(, i32, i32) + +define @intrinsic_vmv.s.x_x_nxv16i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv16i32( %0, i32 %1, i32 %2) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vmv.s.x.nxv1i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv1i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv2i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv4i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv8i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv16i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv16i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv16i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv32i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv32i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv32i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv64i8(, i8, i64); + +define @intrinsic_vmv.s.x_x_nxv64i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv64i8( %0, i8 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv1i16(, i16, i64); + +define @intrinsic_vmv.s.x_x_nxv1i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i16( %0, i16 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i16(, i16, i64); + +define @intrinsic_vmv.s.x_x_nxv2i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i16( %0, i16 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i16(, i16, i64); + +define @intrinsic_vmv.s.x_x_nxv4i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i16( %0, i16 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i16(, i16, i64); + +define @intrinsic_vmv.s.x_x_nxv8i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i16( %0, i16 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv16i16(, i16, i64); + +define @intrinsic_vmv.s.x_x_nxv16i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv16i16( %0, i16 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv32i16(, i16, i64); + +define @intrinsic_vmv.s.x_x_nxv32i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv32i16( %0, i16 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv1i32(, i32, i64); + +define @intrinsic_vmv.s.x_x_nxv1i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i32( %0, i32 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i32(, i32, i64); + +define @intrinsic_vmv.s.x_x_nxv2i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i32( %0, i32 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i32(, i32, i64); + +define @intrinsic_vmv.s.x_x_nxv4i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i32( %0, i32 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i32(, i32, i64); + +define @intrinsic_vmv.s.x_x_nxv8i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i32( %0, i32 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv16i32(, i32, i64); + +define @intrinsic_vmv.s.x_x_nxv16i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv16i32( %0, i32 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv1i64(, i64, i64); + +define @intrinsic_vmv.s.x_x_nxv1i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i64( %0, i64 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv2i64(, i64, i64); + +define @intrinsic_vmv.s.x_x_nxv2i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv2i64( %0, i64 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv4i64(, i64, i64); + +define @intrinsic_vmv.s.x_x_nxv4i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv4i64( %0, i64 %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv8i64(, i64, i64); + +define @intrinsic_vmv.s.x_x_nxv8i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv8i64( %0, i64 %1, i64 %2) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll @@ -0,0 +1,236 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare i8 @llvm.riscv.vmv.x.s.nxv1i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv2i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv4i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv8i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv16i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv32i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv64i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8( %0) + ret i8 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv1i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv2i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv4i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv8i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv16i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv32i16( ) + +define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( %0) + ret i16 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv1i32( ) + +define i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv2i32( ) + +define i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv4i32( ) + +define i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv8i32( ) + +define i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv16i32( ) + +define i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( %0) + ret i32 %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare i8 @llvm.riscv.vmv.x.s.nxv1i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv2i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv4i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv8i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv16i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv32i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8( %0) + ret i8 %a +} + +declare i8 @llvm.riscv.vmv.x.s.nxv64i8() + +define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8( %0) + ret i8 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv1i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv2i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv4i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv8i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv16i16() + +define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( %0) + ret i16 %a +} + +declare i16 @llvm.riscv.vmv.x.s.nxv32i16( ) + +define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( %0) + ret i16 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv1i32( ) + +define signext i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv2i32( ) + +define signext i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv4i32( ) + +define signext i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv8i32( ) + +define signext i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( %0) + ret i32 %a +} + +declare i32 @llvm.riscv.vmv.x.s.nxv16i32( ) + +define signext i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( %0) + ret i32 %a +} + +declare i64 @llvm.riscv.vmv.x.s.nxv1i64( ) + +define i64 @intrinsic_vmv.x.s_s_nxv1i64( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( %0) + ret i64 %a +} + +declare i64 @llvm.riscv.vmv.x.s.nxv2i64( ) + +define i64 @intrinsic_vmv.x.s_s_nxv2i64( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( %0) + ret i64 %a +} + +declare i64 @llvm.riscv.vmv.x.s.nxv4i64( ) + +define i64 @intrinsic_vmv.x.s_s_nxv4i64( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( %0) + ret i64 %a +} + +declare i64 @llvm.riscv.vmv.x.s.nxv8i64() + +define i64 @intrinsic_vmv.x.s_s_nxv8i64( %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.x.s a0, v16 +; CHECK-NEXT: ret +entry: + %a = call i64 @llvm.riscv.vmv.x.s.nxv8i64( %0) + ret i64 %a +}