diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -288,6 +288,9 @@ SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; SDValue lowerSPLATVECTOR(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, + int64_t ExtTrueVal) const; + SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -373,9 +373,18 @@ setOperationAction(ISD::UMIN, VT, Legal); setOperationAction(ISD::UMAX, VT, Legal); - // Lower RVV truncates as a series of "RISCVISD::TRUNCATE_VECTOR" - // nodes which truncate by one power of two at a time. - setOperationAction(ISD::TRUNCATE, VT, Custom); + if (isTypeLegal(VT)) { + // Custom-lower extensions and truncations from/to mask types. + setOperationAction(ISD::ANY_EXTEND, VT, Custom); + setOperationAction(ISD::SIGN_EXTEND, VT, Custom); + setOperationAction(ISD::ZERO_EXTEND, VT, Custom); + + // We custom-lower all legally-typed vector truncates: + // 1. Mask VTs are custom-expanded into a series of standard nodes + // 2. Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR" + // nodes which truncate by one power of two at a time. + setOperationAction(ISD::TRUNCATE, VT, Custom); + } } // We must custom-lower SPLAT_VECTOR vXi64 on RV32 @@ -690,15 +699,19 @@ DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); } case ISD::TRUNCATE: { - // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary - // truncates as a series of "RISCVISD::TRUNCATE_VECTOR" nodes which - // truncate by one power of two at a time. SDLoc DL(Op); EVT VT = Op.getValueType(); - // Only custom-lower non-mask truncates - if (!VT.isVector() || VT.getVectorElementType() == MVT::i1) + // Only custom-lower vector truncates + if (!VT.isVector()) return Op; + // Truncates to mask types are handled differently + if (VT.getVectorElementType() == MVT::i1) + return lowerVectorMaskTrunc(Op, DAG); + + // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary + // truncates as a series of "RISCVISD::TRUNCATE_VECTOR" nodes which + // truncate by one power of two at a time. EVT DstEltVT = VT.getVectorElementType(); SDValue Src = Op.getOperand(0); @@ -721,6 +734,11 @@ return Result; } + case ISD::ANY_EXTEND: + case ISD::ZERO_EXTEND: + return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); + case ISD::SIGN_EXTEND: + return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); case ISD::SPLAT_VECTOR: return lowerSPLATVECTOR(Op, DAG); case ISD::VSCALE: { @@ -1198,6 +1216,76 @@ return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); } +// Custom-lower extensions from mask vectors by using a vselect either with 1 +// for zero/any-extension or -1 for sign-extension: +// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) +// Note that any-extension is lowered identically to zero-extension. +SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, + int64_t ExtTrueVal) const { + SDLoc DL(Op); + EVT VecVT = Op.getValueType(); + SDValue Src = Op.getOperand(0); + // Only custom-lower extensions from mask types + if (!Src.getValueType().isVector() || + Src.getValueType().getVectorElementType() != MVT::i1) + return Op; + + // Be careful not to introduce illegal scalar types at this stage, and be + // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is + // illegal and must be expanded. Since we know that the constants are + // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. + bool IsRV32E64 = + !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; + SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); + SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, Subtarget.getXLenVT()); + + if (!IsRV32E64) { + SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); + SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); + } else { + SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); + SplatTrueVal = + DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); + } + + return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); +} + +// Custom-lower truncations from vectors to mask vectors by using a mask and a +// setcc operation: +// (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) +SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT MaskVT = Op.getValueType(); + // Only expect to custom-lower truncations to mask types + assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && + "Unexpected type for vector mask lowering"); + SDValue Src = Op.getOperand(0); + EVT VecVT = Src.getValueType(); + + // Be careful not to introduce illegal scalar types at this stage, and be + // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is + // illegal and must be expanded. Since we know that the constants are + // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. + bool IsRV32E64 = + !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; + SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); + SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); + + if (!IsRV32E64) { + SplatOne = DAG.getSplatVector(VecVT, DL, SplatOne); + SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); + } else { + SplatOne = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatOne); + SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); + } + + SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); + + return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); +} + SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll @@ -0,0 +1,729 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @sext_nxv1i1_nxv1i8( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i8( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i8_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i8_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i8( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i8( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i8_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i8_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i8( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i8( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i8_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i8_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i8( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i8( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i8_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i8_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv16i1_nxv16i8( %v) { +; CHECK-LABEL: sext_nxv16i1_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv16i1_nxv16i8( %v) { +; CHECK-LABEL: zext_nxv16i1_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv16i8_nxv16i1( %v) { +; CHECK-LABEL: trunc_nxv16i8_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv32i1_nxv32i8( %v) { +; CHECK-LABEL: sext_nxv32i1_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv32i1_nxv32i8( %v) { +; CHECK-LABEL: zext_nxv32i1_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv32i8_nxv32i1( %v) { +; CHECK-LABEL: trunc_nxv32i8_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv64i1_nxv64i8( %v) { +; CHECK-LABEL: sext_nxv64i1_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv64i1_nxv64i8( %v) { +; CHECK-LABEL: zext_nxv64i1_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv64i8_nxv64i1( %v) { +; CHECK-LABEL: trunc_nxv64i8_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv1i1_nxv1i16( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i16( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i16_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i16_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i16( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i16( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i16_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i16_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i16( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i16( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i16_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i16_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i16( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i16( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i16_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i16_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv16i1_nxv16i16( %v) { +; CHECK-LABEL: sext_nxv16i1_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv16i1_nxv16i16( %v) { +; CHECK-LABEL: zext_nxv16i1_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv16i16_nxv16i1( %v) { +; CHECK-LABEL: trunc_nxv16i16_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv32i1_nxv32i16( %v) { +; CHECK-LABEL: sext_nxv32i1_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv32i1_nxv32i16( %v) { +; CHECK-LABEL: zext_nxv32i1_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv32i16_nxv32i1( %v) { +; CHECK-LABEL: trunc_nxv32i16_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv1i1_nxv1i32( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i32( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i32_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i32_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i32( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i32( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i32_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i32_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i32( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i32( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i32_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i32_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i32( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i32( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i32_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i32_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv16i1_nxv16i32( %v) { +; CHECK-LABEL: sext_nxv16i1_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv16i1_nxv16i32( %v) { +; CHECK-LABEL: zext_nxv16i1_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv16i32_nxv16i1( %v) { +; CHECK-LABEL: trunc_nxv16i32_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv1i1_nxv1i64( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i64( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i64_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i64_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i64( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i64( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i64_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i64_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i64( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i64( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i64_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i64_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i64( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i64( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i64_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i64_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll @@ -0,0 +1,729 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @sext_nxv1i1_nxv1i8( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i8( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i8_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i8_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i8( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i8( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i8_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i8_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i8( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i8( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i8_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i8_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i8( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i8( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i8_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i8_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv16i1_nxv16i8( %v) { +; CHECK-LABEL: sext_nxv16i1_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv16i1_nxv16i8( %v) { +; CHECK-LABEL: zext_nxv16i1_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv16i8_nxv16i1( %v) { +; CHECK-LABEL: trunc_nxv16i8_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv32i1_nxv32i8( %v) { +; CHECK-LABEL: sext_nxv32i1_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv32i1_nxv32i8( %v) { +; CHECK-LABEL: zext_nxv32i1_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv32i8_nxv32i1( %v) { +; CHECK-LABEL: trunc_nxv32i8_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv64i1_nxv64i8( %v) { +; CHECK-LABEL: sext_nxv64i1_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv64i1_nxv64i8( %v) { +; CHECK-LABEL: zext_nxv64i1_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv64i8_nxv64i1( %v) { +; CHECK-LABEL: trunc_nxv64i8_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv1i1_nxv1i16( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i16( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i16_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i16_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i16( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i16( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i16_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i16_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i16( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i16( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i16_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i16_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i16( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i16( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i16_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i16_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv16i1_nxv16i16( %v) { +; CHECK-LABEL: sext_nxv16i1_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv16i1_nxv16i16( %v) { +; CHECK-LABEL: zext_nxv16i1_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv16i16_nxv16i1( %v) { +; CHECK-LABEL: trunc_nxv16i16_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv32i1_nxv32i16( %v) { +; CHECK-LABEL: sext_nxv32i1_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv32i1_nxv32i16( %v) { +; CHECK-LABEL: zext_nxv32i1_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv32i16_nxv32i1( %v) { +; CHECK-LABEL: trunc_nxv32i16_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv1i1_nxv1i32( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i32( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i32_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i32_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i32( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i32( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i32_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i32_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i32( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i32( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i32_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i32_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i32( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i32( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i32_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i32_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv16i1_nxv16i32( %v) { +; CHECK-LABEL: sext_nxv16i1_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv16i1_nxv16i32( %v) { +; CHECK-LABEL: zext_nxv16i1_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv16i32_nxv16i1( %v) { +; CHECK-LABEL: trunc_nxv16i32_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv1i1_nxv1i64( %v) { +; CHECK-LABEL: sext_nxv1i1_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv1i1_nxv1i64( %v) { +; CHECK-LABEL: zext_nxv1i1_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v16, v25, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv1i64_nxv1i1( %v) { +; CHECK-LABEL: trunc_nxv1i64_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vand.vi v25, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v25, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv2i1_nxv2i64( %v) { +; CHECK-LABEL: sext_nxv2i1_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv2i1_nxv2i64( %v) { +; CHECK-LABEL: zext_nxv2i1_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v16, v26, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv2i64_nxv2i1( %v) { +; CHECK-LABEL: trunc_nxv2i64_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vand.vi v26, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv4i1_nxv4i64( %v) { +; CHECK-LABEL: sext_nxv4i1_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv4i1_nxv4i64( %v) { +; CHECK-LABEL: zext_nxv4i1_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v16, v28, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv4i64_nxv4i1( %v) { +; CHECK-LABEL: trunc_nxv4i64_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vand.vi v28, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v28, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} + +define @sext_nxv8i1_nxv8i64( %v) { +; CHECK-LABEL: sext_nxv8i1_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, -1, v0 +; CHECK-NEXT: ret + %r = sext %v to + ret %r +} + +define @zext_nxv8i1_nxv8i64( %v) { +; CHECK-LABEL: zext_nxv8i1_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v16, v8, 1, v0 +; CHECK-NEXT: ret + %r = zext %v to + ret %r +} + +define @trunc_nxv8i64_nxv8i1( %v) { +; CHECK-LABEL: trunc_nxv8i64_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %r = trunc %v to + ret %r +} +