diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -191,6 +191,7 @@ struct RISCVMaskedPseudoInfo { uint16_t MaskedPseudo; uint16_t UnmaskedPseudo; + uint16_t UnmaskedTuPseudo; uint8_t MaskOpIdx; }; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2242,6 +2242,7 @@ const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode()); + bool IsTU = false; if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) { // The last operand of the pseudo is the policy op, but we're expecting a // Glue operand last. We may also have a chain. @@ -2251,27 +2252,42 @@ if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other) (*TailPolicyOpIdx)--; - // If the policy isn't TAIL_AGNOSTIC we can't perform this optimization. - if (N->getConstantOperandVal(*TailPolicyOpIdx) != RISCVII::TAIL_AGNOSTIC) - return false; + if (N->getConstantOperandVal(*TailPolicyOpIdx) == + RISCVII::TAIL_UNDISTURBED && + !N->getOperand(0).isUndef()) + IsTU = true; } - const MCInstrDesc &UnmaskedMCID = TII->get(I->UnmaskedPseudo); + if (IsTU) { + const MCInstrDesc &UnmaskedTuMCID = TII->get(I->UnmaskedTuPseudo); - // Check that we're dropping the merge operand, the mask operand, and any - // policy operand when we transform to this unmasked pseudo. - assert(!RISCVII::hasMergeOp(UnmaskedMCID.TSFlags) && - RISCVII::hasDummyMaskOp(UnmaskedMCID.TSFlags) && - !RISCVII::hasVecPolicyOp(UnmaskedMCID.TSFlags) && - "Unexpected pseudo to transform to"); - (void)UnmaskedMCID; + // Check that we're dropping the mask operand, and any policy operand + // when we transform to this unmasked tu pseudo. + assert(RISCVII::hasMergeOp(UnmaskedTuMCID.TSFlags) && + RISCVII::hasDummyMaskOp(UnmaskedTuMCID.TSFlags) && + !RISCVII::hasVecPolicyOp(UnmaskedTuMCID.TSFlags) && + "Unexpected pseudo to transform to"); + (void)UnmaskedTuMCID; + } else { + const MCInstrDesc &UnmaskedMCID = TII->get(I->UnmaskedPseudo); + + // Check that we're dropping the merge operand, the mask operand, and any + // policy operand when we transform to this unmasked pseudo. + assert(!RISCVII::hasMergeOp(UnmaskedMCID.TSFlags) && + RISCVII::hasDummyMaskOp(UnmaskedMCID.TSFlags) && + !RISCVII::hasVecPolicyOp(UnmaskedMCID.TSFlags) && + "Unexpected pseudo to transform to"); + (void)UnmaskedMCID; + } + unsigned Opc = IsTU ? I->UnmaskedTuPseudo : I->UnmaskedPseudo; + unsigned Id = IsTU ? 0 : 1; SmallVector Ops; // Skip the merge operand at index 0. - for (unsigned I = 1, E = N->getNumOperands(); I != E; I++) { + for (unsigned E = N->getNumOperands(); Id != E; Id++) { // Skip the mask, the policy, and the Glue. - SDValue Op = N->getOperand(I); - if (I == MaskOpIdx || I == TailPolicyOpIdx || + SDValue Op = N->getOperand(Id); + if (Id == MaskOpIdx || Id == TailPolicyOpIdx || Op.getValueType() == MVT::Glue) continue; Ops.push_back(Op); @@ -2281,8 +2297,7 @@ if (auto *TGlued = Glued->getGluedNode()) Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1)); - SDNode *Result = - CurDAG->getMachineNode(I->UnmaskedPseudo, SDLoc(N), N->getVTList(), Ops); + SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops); ReplaceUses(N, Result); return true; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4770,11 +4770,12 @@ SDValue VL = Op.getOperand(NumOperands - 1); MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); + unsigned PolicyImm = Op.getOperand(0).isUndef() ? RISCVII::TAIL_AGNOSTIC + : RISCVII::TAIL_UNDISTURBED; + SDValue Policy = DAG.getTargetConstant(PolicyImm, DL, XLenVT); Ops.push_back(TrueMask); Ops.push_back(VL); - // Since unmasked intrinsics and pseudos have no policy operand, - // we use 0 here for pattern matching. - Ops.push_back(DAG.getConstant(0, DL, XLenVT)); // Policy + Ops.push_back(Policy); } return DAG.getNode(Opc, DL, VTs, Ops); } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -426,13 +426,14 @@ class RISCVMaskedPseudo MaskIdx> { Pseudo MaskedPseudo = !cast(NAME); Pseudo UnmaskedPseudo = !cast(!subst("_MASK", "", NAME)); + Pseudo UnmaskedTuPseudo = !cast(!subst("_MASK", "", NAME # "_TU")); bits<4> MaskOpIdx = MaskIdx; } def RISCVMaskedPseudosTable : GenericTable { let FilterClass = "RISCVMaskedPseudo"; let CppTypeName = "RISCVMaskedPseudoInfo"; - let Fields = ["MaskedPseudo", "UnmaskedPseudo", "MaskOpIdx"]; + let Fields = ["MaskedPseudo", "UnmaskedPseudo", "UnmaskedTuPseudo", "MaskOpIdx"]; let PrimaryKey = ["MaskedPseudo"]; let PrimaryKeyName = "getMaskedPseudoInfo"; } @@ -2656,7 +2657,8 @@ def "_" # MInfo.MX : VPseudoUnaryNoMask; def "_" # MInfo.MX # "_TU": VPseudoUnaryNoMaskTU; def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA; + Constraint>, + RISCVMaskedPseudo; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -621,21 +621,6 @@ LMULInfo vlmul, VReg result_reg_class, VReg op2_reg_class> { - def : Pat<(result_type (vop (result_type undef), - (op2_type op2_reg_class:$rs2), - (mask_type true_mask), - VLOpFrag, (XLenVT 0))), - (!cast(inst#"_"#kind#"_"#vlmul.MX) - (op2_type op2_reg_class:$rs2), - GPR:$vl, sew)>; - def : Pat<(result_type (vop (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type true_mask), - VLOpFrag, (XLenVT 0))), - (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - GPR:$vl, sew)>; def : Pat<(result_type (vop (result_type result_reg_class:$merge), (op2_type op2_reg_class:$rs2), (mask_type V0), diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll @@ -61,6 +61,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( , , @@ -119,6 +137,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( , , @@ -177,6 +213,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( , , @@ -235,6 +289,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( , , @@ -293,6 +365,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( , , @@ -351,6 +441,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( , , @@ -409,6 +517,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( , , @@ -467,6 +593,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( , , @@ -525,6 +669,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( , , @@ -583,6 +745,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( , , @@ -641,6 +821,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( , , @@ -699,6 +897,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( , , @@ -757,6 +973,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( , , @@ -815,6 +1049,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( , , @@ -872,3 +1124,29 @@ ret %a } + +define @intrinsic_allone_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll @@ -61,6 +61,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( , , @@ -119,6 +137,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( , , @@ -177,6 +213,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( , , @@ -235,6 +289,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( , , @@ -293,6 +365,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( , , @@ -351,6 +441,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( , , @@ -409,6 +517,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( , , @@ -467,6 +593,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( , , @@ -525,6 +669,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( , , @@ -583,6 +745,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( , , @@ -641,6 +821,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( , , @@ -699,6 +897,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( , , @@ -757,6 +973,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( , , @@ -815,6 +1049,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( , , @@ -872,3 +1124,29 @@ ret %a } + +define @intrinsic_allone_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll @@ -61,6 +61,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( , , @@ -119,6 +137,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( , , @@ -177,6 +213,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( , , @@ -235,6 +289,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( , , @@ -293,6 +365,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( , , @@ -351,6 +441,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( , , @@ -409,6 +517,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( , , @@ -467,6 +593,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( , , @@ -525,6 +669,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( , , @@ -583,6 +745,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( , , @@ -641,6 +821,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( , , @@ -699,6 +897,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( , , @@ -757,6 +973,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( , , @@ -815,6 +1049,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( , , @@ -872,3 +1124,29 @@ ret %a } + +define @intrinsic_allone_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll @@ -61,6 +61,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( , , @@ -119,6 +137,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( , , @@ -177,6 +213,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( , , @@ -235,6 +289,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( , , @@ -293,6 +365,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( , , @@ -351,6 +441,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( , , @@ -409,6 +517,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( , , @@ -467,6 +593,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( , , @@ -525,6 +669,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( , , @@ -583,6 +745,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( , , @@ -641,6 +821,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( , , @@ -699,6 +897,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( , , @@ -757,6 +973,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( , , @@ -815,6 +1049,24 @@ ret %a } +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( , , @@ -872,3 +1124,29 @@ ret %a } + +define @intrinsic_allone_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( , , @@ -533,3 +685,30 @@ ret %a } + +define @intrinsic_allone_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( , , @@ -533,3 +685,30 @@ ret %a } + +define @intrinsic_allone_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( , , @@ -533,3 +685,30 @@ ret %a } + +define @intrinsic_allone_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( , , @@ -534,6 +686,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( , , @@ -593,6 +764,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( , , @@ -652,6 +842,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( , , @@ -711,6 +920,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( , , @@ -770,6 +998,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( , , @@ -829,6 +1076,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( , , @@ -887,3 +1153,30 @@ ret %a } + +define @intrinsic_allone_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( , , @@ -534,6 +686,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( , , @@ -593,6 +764,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( , , @@ -652,6 +842,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( , , @@ -711,6 +920,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( , , @@ -770,6 +998,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( , , @@ -829,6 +1076,25 @@ ret %a } +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( , , @@ -887,3 +1153,30 @@ ret %a } + +define @intrinsic_allone_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( , , @@ -533,3 +685,30 @@ ret %a } + +define @intrinsic_allone_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( , , @@ -534,6 +686,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( , , @@ -593,6 +764,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( , , @@ -652,6 +842,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( , , @@ -711,6 +920,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( , , @@ -770,6 +998,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( , , @@ -829,6 +1076,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( , , @@ -887,3 +1153,30 @@ ret %a } + +define @intrinsic_allone_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv32i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( , , @@ -534,6 +686,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( , , @@ -593,6 +764,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( , , @@ -652,6 +842,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( , , @@ -711,6 +920,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( , , @@ -770,6 +998,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( , , @@ -829,6 +1076,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( , , @@ -887,3 +1153,30 @@ ret %a } + +define @intrinsic_allone_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( , , @@ -533,3 +685,30 @@ ret %a } + +define @intrinsic_allone_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll @@ -62,6 +62,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( , , @@ -121,6 +140,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( , , @@ -180,6 +218,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( , , @@ -239,6 +296,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( , , @@ -298,6 +374,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv16i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( , , @@ -357,6 +452,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv1i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( , , @@ -416,6 +530,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv2i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( , , @@ -475,6 +608,25 @@ ret %a } +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv4i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( , , @@ -533,3 +685,30 @@ ret %a } + +define @intrinsic_allone_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_allone_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %allone = call @llvm.riscv.vmset.nxv8i1( + iXLen %1); + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + undef, + %0, + %allone, + iXLen %1, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv1i1(iXLen); +declare @llvm.riscv.vmset.nxv2i1(iXLen); +declare @llvm.riscv.vmset.nxv4i1(iXLen); +declare @llvm.riscv.vmset.nxv8i1(iXLen); +declare @llvm.riscv.vmset.nxv16i1(iXLen); +declare @llvm.riscv.vmset.nxv32i1(iXLen); +declare @llvm.riscv.vmset.nxv64i1(iXLen);