diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -374,6 +374,39 @@ // We must custom-lower SPLAT_VECTOR vXi64 on RV32 if (!Subtarget.is64Bit()) setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom); + + ISD::CondCode VFPCCToExpand[] = {ISD::SETO, ISD::SETONE, ISD::SETUEQ, + ISD::SETUGT, ISD::SETUGE, ISD::SETULT, + ISD::SETULE, ISD::SETUO}; + + if (Subtarget.hasStdExtZfh()) { + for (auto VT : {RISCVVMVTs::vfloat16mf4_t, RISCVVMVTs::vfloat16mf2_t, + RISCVVMVTs::vfloat16m1_t, RISCVVMVTs::vfloat16m2_t, + RISCVVMVTs::vfloat16m4_t, RISCVVMVTs::vfloat16m8_t}) { + setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); + for (auto CC : VFPCCToExpand) + setCondCodeAction(CC, VT, Expand); + } + } + + if (Subtarget.hasStdExtF()) { + for (auto VT : {RISCVVMVTs::vfloat32mf2_t, RISCVVMVTs::vfloat32m1_t, + RISCVVMVTs::vfloat32m2_t, RISCVVMVTs::vfloat32m4_t, + RISCVVMVTs::vfloat32m8_t}) { + setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); + for (auto CC : VFPCCToExpand) + setCondCodeAction(CC, VT, Expand); + } + } + + if (Subtarget.hasStdExtD()) { + for (auto VT : {RISCVVMVTs::vfloat64m1_t, RISCVVMVTs::vfloat64m2_t, + RISCVVMVTs::vfloat64m4_t, RISCVVMVTs::vfloat64m8_t}) { + setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); + for (auto CC : VFPCCToExpand) + setCondCodeAction(CC, VT, Expand); + } + } } // Function alignments. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -197,6 +197,45 @@ SplatPat_simm5, simm5, swap>; } +multiclass VPatFPSetCCSDNode_VV { + foreach fvti = AllFloatVectors in { + defvar instruction = !cast(instruction_name#"_VV_"#fvti.LMul.MX); + def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), + (fvti.Vector fvti.RegClass:$rs2), + cc)), + SwapHelper<(instruction), + (instruction fvti.RegClass:$rs1), + (instruction fvti.RegClass:$rs2), + (instruction VLMax, fvti.SEW), + swap>.Value>; + } +} + +multiclass VPatFPSetCCSDNode_VF { + foreach fvti = AllFloatVectors in { + defvar instruction = !cast(instruction_name#"_VF_"#fvti.LMul.MX); + def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1), + (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)), + cc)), + SwapHelper<(instruction), + (instruction fvti.RegClass:$rs1), + (instruction ToFPR32.ret), + (instruction VLMax, fvti.SEW), + swap>.Value>; + } +} + +multiclass VPatFPSetCCSDNode_VV_VF { + defm : VPatFPSetCCSDNode_VV; + defm : VPatFPSetCCSDNode_VF; +} + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -304,6 +343,33 @@ } // Predicates = [HasStdExtV] +let Predicates = [HasStdExtV, HasStdExtF] in { + +// 14.11. Vector Floating-Point Compare Instructions +defm "" : VPatFPSetCCSDNode_VV_VF; +defm "" : VPatFPSetCCSDNode_VV_VF; + +defm "" : VPatFPSetCCSDNode_VV_VF; +defm "" : VPatFPSetCCSDNode_VV_VF; + +defm "" : VPatFPSetCCSDNode_VV_VF; +defm "" : VPatFPSetCCSDNode_VV_VF; + +defm "" : VPatFPSetCCSDNode_VV_VF; +defm "" : VPatFPSetCCSDNode_VV_VF; + +defm "" : VPatFPSetCCSDNode_VV; +defm "" : VPatFPSetCCSDNode_VV; +defm "" : VPatFPSetCCSDNode_VF; +defm "" : VPatFPSetCCSDNode_VF; + +defm "" : VPatFPSetCCSDNode_VV; +defm "" : VPatFPSetCCSDNode_VV; +defm "" : VPatFPSetCCSDNode_VF; +defm "" : VPatFPSetCCSDNode_VF; + +} // Predicates = [HasStdExtV, HasStdExtF] + //===----------------------------------------------------------------------===// // Vector Splats //===----------------------------------------------------------------------===// @@ -338,3 +404,11 @@ } } } // Predicates = [HasStdExtV, IsRV32] + +let Predicates = [HasStdExtV, HasStdExtF] in { +foreach fvti = AllFloatVectors in + def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)), + (!cast("PseudoVFMV_V_F_"#fvti.LMul.MX) + ToFPR32.ret, + VLMax, fvti.SEW)>; +} // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll @@ -0,0 +1,2778 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; FIXME: The scalar/vector operations ('fv' tests) should swap operands and +; condition codes accordingly in order to generate a 'vf' instruction. + +define @fcmp_oeq_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_oeq_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oeq_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %splat, %va + ret %vc +} + +define @fcmp_oeq_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_ogt_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_ogt_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ogt_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %splat, %va + ret %vc +} + +define @fcmp_ogt_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_oge_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_oge_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oge_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_oge_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oge_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %splat, %va + ret %vc +} + +define @fcmp_oge_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oge_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_olt_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_olt_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_olt_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_olt_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_olt_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %splat, %va + ret %vc +} + +define @fcmp_olt_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_olt_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_ole_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ole_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ole_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_ole_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ole_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %splat, %va + ret %vc +} + +define @fcmp_ole_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ole_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_one_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_one_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v25, v16, v18 +; CHECK-NEXT: vmfeq.vv v26, v18, v18 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_one_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v16, fa0 +; CHECK-NEXT: vmfeq.vf v28, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v28 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_one_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_one_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vv v25, v26, v16 +; CHECK-NEXT: vmfeq.vf v28, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v28, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %splat, %va + ret %vc +} + +define @fcmp_one_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_one_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_ord_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ord_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v18, v18 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ord_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ord_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ord_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %splat, %va + ret %vc +} + +define @fcmp_ord_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ord_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v18, v18 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ueq_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v16, v18 +; CHECK-NEXT: vmfne.vv v26, v18, v18 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v16, fa0 +; CHECK-NEXT: vmfne.vf v28, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v28 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ueq_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ueq_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vv v25, v26, v16 +; CHECK-NEXT: vmfne.vf v28, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v28, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %splat, %va + ret %vc +} + +define @fcmp_ueq_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ugt_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v25, v16, v18 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_ugt_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ugt_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v25, v26, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %splat, %va + ret %vc +} + +define @fcmp_ugt_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_uge_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_uge_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v25, v16, v18 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uge_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_uge_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uge_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v25, v26, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %splat, %va + ret %vc +} + +define @fcmp_uge_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uge_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_ult_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ult_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v25, v18, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ult_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ult_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ult_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v25, v16, v26 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %splat, %va + ret %vc +} + +define @fcmp_ult_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ult_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ule_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ule_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v25, v18, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ule_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_ule_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ule_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v25, v16, v26 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %splat, %va + ret %vc +} + +define @fcmp_ule_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ule_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_une_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_une_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_une_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_une_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_une_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %splat, %va + ret %vc +} + +define @fcmp_une_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_une_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_uno_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_uno_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v25, v18, v18 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uno_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_uno_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uno_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %splat, %va + ret %vc +} + +define @fcmp_uno_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uno_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v25, v18, v18 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_oeq_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_oeq_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oeq_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %splat, %va + ret %vc +} + +define @fcmp_oeq_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_ogt_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_ogt_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ogt_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %splat, %va + ret %vc +} + +define @fcmp_ogt_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_oge_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_oge_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oge_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_oge_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oge_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %splat, %va + ret %vc +} + +define @fcmp_oge_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_olt_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_olt_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_olt_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_olt_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_olt_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %splat, %va + ret %vc +} + +define @fcmp_olt_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_ole_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ole_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ole_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_ole_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ole_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %splat, %va + ret %vc +} + +define @fcmp_ole_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_one_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_one_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v25, v16, v20 +; CHECK-NEXT: vmfeq.vv v26, v20, v20 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_one_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v16, fa0 +; CHECK-NEXT: vmfeq.vf v26, v28, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_one_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_one_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vv v25, v28, v16 +; CHECK-NEXT: vmfeq.vf v26, v28, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %splat, %va + ret %vc +} + +define @fcmp_one_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_ord_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ord_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v20, v20 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ord_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v28, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ord_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ord_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v28, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %splat, %va + ret %vc +} + +define @fcmp_ord_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v20, v20 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v28, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ueq_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v16, v20 +; CHECK-NEXT: vmfne.vv v26, v20, v20 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v16, fa0 +; CHECK-NEXT: vmfne.vf v26, v28, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ueq_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ueq_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vv v25, v28, v16 +; CHECK-NEXT: vmfne.vf v26, v28, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %splat, %va + ret %vc +} + +define @fcmp_ueq_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ugt_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v25, v16, v20 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_ugt_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ugt_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v25, v28, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %splat, %va + ret %vc +} + +define @fcmp_ugt_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_uge_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_uge_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v25, v16, v20 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uge_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_uge_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uge_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v25, v28, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %splat, %va + ret %vc +} + +define @fcmp_uge_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_ult_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ult_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v25, v20, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ult_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ult_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ult_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v25, v16, v28 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %splat, %va + ret %vc +} + +define @fcmp_ult_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ule_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ule_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v25, v20, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ule_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_ule_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ule_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v25, v16, v28 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %splat, %va + ret %vc +} + +define @fcmp_ule_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_une_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_une_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_une_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_une_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_une_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %splat, %va + ret %vc +} + +define @fcmp_une_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_uno_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_uno_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v25, v20, v20 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uno_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v28, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_uno_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uno_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v28, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %splat, %va + ret %vc +} + +define @fcmp_uno_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v25, v20, v20 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v28, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_oeq_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_oeq_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oeq_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %splat, %va + ret %vc +} + +define @fcmp_oeq_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_ogt_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_ogt_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ogt_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %splat, %va + ret %vc +} + +define @fcmp_ogt_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_oge_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_oge_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oge_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_oge_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oge_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %splat, %va + ret %vc +} + +define @fcmp_oge_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_olt_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_olt_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_olt_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_olt_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_olt_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %splat, %va + ret %vc +} + +define @fcmp_olt_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_ole_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ole_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ole_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_ole_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ole_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %splat, %va + ret %vc +} + +define @fcmp_ole_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_one_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_one_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v25, v16, v8 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vmfeq.vv v27, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_one_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v16, fa0 +; CHECK-NEXT: vmfeq.vf v26, v8, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_one_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_one_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vv v25, v8, v16 +; CHECK-NEXT: vmfeq.vf v26, v8, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %splat, %va + ret %vc +} + +define @fcmp_one_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_ord_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ord_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v25, v16, v16 +; CHECK-NEXT: vmfeq.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ord_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v8, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ord_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ord_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v8, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %splat, %va + ret %vc +} + +define @fcmp_ord_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v25, v16, v16 +; CHECK-NEXT: vmfeq.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v8, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ueq_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v25, v16, v8 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vmfne.vv v27, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v16, fa0 +; CHECK-NEXT: vmfne.vf v26, v8, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ueq_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ueq_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vv v25, v8, v16 +; CHECK-NEXT: vmfne.vf v26, v8, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %splat, %va + ret %vc +} + +define @fcmp_ueq_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ugt_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_ugt_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ugt_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %splat, %va + ret %vc +} + +define @fcmp_ugt_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_uge_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_uge_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uge_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_uge_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uge_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %splat, %va + ret %vc +} + +define @fcmp_uge_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_ult_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ult_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ult_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ult_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ult_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %splat, %va + ret %vc +} + +define @fcmp_ult_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ule_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ule_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ule_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_ule_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ule_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %splat, %va + ret %vc +} + +define @fcmp_ule_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_une_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_une_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_une_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_une_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_une_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %splat, %va + ret %vc +} + +define @fcmp_une_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_uno_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_uno_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v25, v16, v16 +; CHECK-NEXT: vmfne.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uno_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v8, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_uno_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uno_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v8, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %splat, %va + ret %vc +} + +define @fcmp_uno_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v25, v16, v16 +; CHECK-NEXT: vmfne.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v8, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +attributes #0 = { "no-nans-fp-math"="true" } diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll @@ -0,0 +1,2778 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; FIXME: The scalar/vector operations ('fv' tests) should swap operands and +; condition codes accordingly in order to generate a 'vf' instruction. + +define @fcmp_oeq_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_oeq_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oeq_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %splat, %va + ret %vc +} + +define @fcmp_oeq_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_ogt_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_ogt_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ogt_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %splat, %va + ret %vc +} + +define @fcmp_ogt_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_oge_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_oge_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oge_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_oge_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_oge_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %splat, %va + ret %vc +} + +define @fcmp_oge_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oge_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_olt_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_olt_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_olt_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_olt_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_olt_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %splat, %va + ret %vc +} + +define @fcmp_olt_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_olt_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_ole_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ole_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ole_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_ole_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ole_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %splat, %va + ret %vc +} + +define @fcmp_ole_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ole_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_one_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_one_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v25, v16, v18 +; CHECK-NEXT: vmfeq.vv v26, v18, v18 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_one_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v16, fa0 +; CHECK-NEXT: vmfeq.vf v28, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v28 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_one_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_one_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vv v25, v26, v16 +; CHECK-NEXT: vmfeq.vf v28, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v28, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %splat, %va + ret %vc +} + +define @fcmp_one_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_one_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_ord_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ord_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v18, v18 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ord_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ord_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ord_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %splat, %va + ret %vc +} + +define @fcmp_ord_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ord_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v18, v18 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v26, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ueq_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v16, v18 +; CHECK-NEXT: vmfne.vv v26, v18, v18 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vf v25, v16, fa0 +; CHECK-NEXT: vmfne.vf v28, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v28 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ueq_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ueq_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfeq.vv v25, v26, v16 +; CHECK-NEXT: vmfne.vf v28, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v28, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %splat, %va + ret %vc +} + +define @fcmp_ueq_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ugt_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v25, v16, v18 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_ugt_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ugt_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v25, v26, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %splat, %va + ret %vc +} + +define @fcmp_ugt_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_uge_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_uge_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v25, v16, v18 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uge_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_uge_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uge_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v25, v26, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %splat, %va + ret %vc +} + +define @fcmp_uge_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uge_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_ult_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ult_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v25, v18, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ult_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfge.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ult_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ult_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfle.vv v25, v16, v26 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %splat, %va + ret %vc +} + +define @fcmp_ult_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ult_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ule_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_ule_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v25, v18, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ule_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfgt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_ule_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_ule_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmflt.vv v25, v16, v26 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %splat, %va + ret %vc +} + +define @fcmp_ule_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ule_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_une_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_une_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_une_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_une_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_une_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %splat, %va + ret %vc +} + +define @fcmp_une_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_une_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_uno_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: fcmp_uno_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v25, v18, v18 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uno_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_uno_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: fcmp_uno_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %splat, %va + ret %vc +} + +define @fcmp_uno_vv_nxv8f16_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uno_vv_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmfne.vv v25, v18, v18 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f16_nonans( %va, half %b) #0 { +; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vmfne.vf v25, v26, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, half %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_oeq_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_oeq_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oeq_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %splat, %va + ret %vc +} + +define @fcmp_oeq_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_ogt_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_ogt_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ogt_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %splat, %va + ret %vc +} + +define @fcmp_ogt_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_oge_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_oge_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oge_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_oge_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_oge_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %splat, %va + ret %vc +} + +define @fcmp_oge_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_olt_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_olt_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_olt_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_olt_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_olt_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %splat, %va + ret %vc +} + +define @fcmp_olt_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_ole_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ole_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ole_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_ole_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ole_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %splat, %va + ret %vc +} + +define @fcmp_ole_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_one_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_one_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v25, v16, v20 +; CHECK-NEXT: vmfeq.vv v26, v20, v20 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_one_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v16, fa0 +; CHECK-NEXT: vmfeq.vf v26, v28, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_one_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_one_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vv v25, v28, v16 +; CHECK-NEXT: vmfeq.vf v26, v28, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %splat, %va + ret %vc +} + +define @fcmp_one_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_ord_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ord_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v20, v20 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ord_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v28, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ord_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ord_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v28, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %splat, %va + ret %vc +} + +define @fcmp_ord_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v20, v20 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v28, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ueq_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v25, v16, v20 +; CHECK-NEXT: vmfne.vv v26, v20, v20 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vf v25, v16, fa0 +; CHECK-NEXT: vmfne.vf v26, v28, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ueq_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ueq_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfeq.vv v25, v28, v16 +; CHECK-NEXT: vmfne.vf v26, v28, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %splat, %va + ret %vc +} + +define @fcmp_ueq_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ugt_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v25, v16, v20 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_ugt_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ugt_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v25, v28, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %splat, %va + ret %vc +} + +define @fcmp_ugt_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_uge_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_uge_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v25, v16, v20 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uge_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_uge_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uge_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v25, v28, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %splat, %va + ret %vc +} + +define @fcmp_uge_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_ult_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ult_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v25, v20, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ult_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfge.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ult_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ult_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfle.vv v25, v16, v28 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %splat, %va + ret %vc +} + +define @fcmp_ult_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ule_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_ule_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v25, v20, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ule_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfgt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_ule_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_ule_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmflt.vv v25, v16, v28 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %splat, %va + ret %vc +} + +define @fcmp_ule_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_une_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_une_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_une_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_une_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_une_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %splat, %va + ret %vc +} + +define @fcmp_une_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_uno_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: fcmp_uno_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v25, v20, v20 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uno_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v28, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_uno_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: fcmp_uno_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v28, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %splat, %va + ret %vc +} + +define @fcmp_uno_vv_nxv8f32_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmfne.vv v25, v20, v20 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f32_nonans( %va, float %b) #0 { +; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vmfne.vf v25, v28, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, float %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_oeq_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_oeq_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oeq_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %splat, %va + ret %vc +} + +define @fcmp_oeq_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp oeq %va, %vb + ret %vc +} + +define @fcmp_oeq_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oeq %va, %splat + ret %vc +} + +define @fcmp_ogt_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_ogt_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ogt_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %splat, %va + ret %vc +} + +define @fcmp_ogt_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp ogt %va, %vb + ret %vc +} + +define @fcmp_ogt_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ogt %va, %splat + ret %vc +} + +define @fcmp_oge_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_oge_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oge_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_oge_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_oge_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %splat, %va + ret %vc +} + +define @fcmp_oge_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp oge %va, %vb + ret %vc +} + +define @fcmp_oge_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp oge %va, %splat + ret %vc +} + +define @fcmp_olt_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_olt_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_olt_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_olt_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_olt_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %splat, %va + ret %vc +} + +define @fcmp_olt_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp olt %va, %vb + ret %vc +} + +define @fcmp_olt_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp olt %va, %splat + ret %vc +} + +define @fcmp_ole_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ole_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ole_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_ole_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ole_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %splat, %va + ret %vc +} + +define @fcmp_ole_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ole %va, %vb + ret %vc +} + +define @fcmp_ole_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ole %va, %splat + ret %vc +} + +define @fcmp_one_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_one_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v25, v16, v8 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vmfeq.vv v27, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_one_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v16, fa0 +; CHECK-NEXT: vmfeq.vf v26, v8, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v27, v26 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_one_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_one_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vv v25, v8, v16 +; CHECK-NEXT: vmfeq.vf v26, v8, fa0 +; CHECK-NEXT: vmfeq.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v26, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %splat, %va + ret %vc +} + +define @fcmp_one_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp one %va, %vb + ret %vc +} + +define @fcmp_one_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp one %va, %splat + ret %vc +} + +define @fcmp_ord_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ord_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v25, v16, v16 +; CHECK-NEXT: vmfeq.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ord_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v8, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ord_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ord_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v8, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %splat, %va + ret %vc +} + +define @fcmp_ord_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v25, v16, v16 +; CHECK-NEXT: vmfeq.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ord %va, %vb + ret %vc +} + +define @fcmp_ord_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v8, fa0 +; CHECK-NEXT: vmfeq.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ord %va, %splat + ret %vc +} + +define @fcmp_ueq_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v25, v16, v8 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vmfne.vv v27, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vf v25, v16, fa0 +; CHECK-NEXT: vmfne.vf v26, v8, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v27, v26 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ueq_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ueq_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfeq.vv v25, v8, v16 +; CHECK-NEXT: vmfne.vf v26, v8, fa0 +; CHECK-NEXT: vmfne.vv v27, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v26, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %splat, %va + ret %vc +} + +define @fcmp_ueq_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfeq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ueq %va, %vb + ret %vc +} + +define @fcmp_ueq_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfeq.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ueq %va, %splat + ret %vc +} + +define @fcmp_ugt_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_ugt_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ugt_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %splat, %va + ret %vc +} + +define @fcmp_ugt_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp ugt %va, %vb + ret %vc +} + +define @fcmp_ugt_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ugt %va, %splat + ret %vc +} + +define @fcmp_uge_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_uge_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uge_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_uge_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uge_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %splat, %va + ret %vc +} + +define @fcmp_uge_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = fcmp uge %va, %vb + ret %vc +} + +define @fcmp_uge_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uge %va, %splat + ret %vc +} + +define @fcmp_ult_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ult_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ult_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfge.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ult_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ult_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfle.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %splat, %va + ret %vc +} + +define @fcmp_ult_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ult %va, %vb + ret %vc +} + +define @fcmp_ult_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmflt.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ult %va, %splat + ret %vc +} + +define @fcmp_ule_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_ule_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmflt.vv v25, v8, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ule_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfgt.vf v25, v16, fa0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_ule_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_ule_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmflt.vv v25, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v26 +; CHECK-NEXT: vmxor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %splat, %va + ret %vc +} + +define @fcmp_ule_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp ule %va, %vb + ret %vc +} + +define @fcmp_ule_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfle.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp ule %va, %splat + ret %vc +} + +define @fcmp_une_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_une_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_une_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_une_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_une_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %splat, %va + ret %vc +} + +define @fcmp_une_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = fcmp une %va, %vb + ret %vc +} + +define @fcmp_une_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmfne.vf v0, v16, fa0 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp une %va, %splat + ret %vc +} + +define @fcmp_uno_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: fcmp_uno_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v25, v16, v16 +; CHECK-NEXT: vmfne.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uno_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v8, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +define @fcmp_uno_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: fcmp_uno_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v8, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %splat, %va + ret %vc +} + +define @fcmp_uno_vv_nxv8f64_nonans( %va, %vb) #0 { +; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmfne.vv v25, v16, v16 +; CHECK-NEXT: vmfne.vv v26, v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: ret + %vc = fcmp uno %va, %vb + ret %vc +} + +define @fcmp_uno_vf_nxv8f64_nonans( %va, double %b) #0 { +; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmfne.vf v25, v8, fa0 +; CHECK-NEXT: vmfne.vv v26, v16, v16 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: ret + %head = insertelement undef, double %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = fcmp uno %va, %splat + ret %vc +} + +attributes #0 = { "no-nans-fp-math"="true" } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+experimental-zfh,+experimental-v -target-abi ilp32d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV32V +; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+experimental-zfh,+experimental-v -target-abi lp64d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV64V + +define @vsplat_nxv8f16(half %f) { +; RV32V-LABEL: vsplat_nxv8f16: +; RV32V: # %bb.0: +; RV32V-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; RV32V-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; RV32V-NEXT: vfmv.v.f v16, fa0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8f16: +; RV64V: # %bb.0: +; RV64V-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; RV64V-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; RV64V-NEXT: vfmv.v.f v16, fa0 +; RV64V-NEXT: ret + %head = insertelement undef, half %f, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8f32(float %f) { +; RV32V-LABEL: vsplat_nxv8f32: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; RV32V-NEXT: vfmv.v.f v16, fa0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8f32: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; RV64V-NEXT: vfmv.v.f v16, fa0 +; RV64V-NEXT: ret + %head = insertelement undef, float %f, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +} + +define @vsplat_nxv8f64(double %f) { +; RV32V-LABEL: vsplat_nxv8f64: +; RV32V: # %bb.0: +; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV32V-NEXT: vfmv.v.f v16, fa0 +; RV32V-NEXT: ret +; +; RV64V-LABEL: vsplat_nxv8f64: +; RV64V: # %bb.0: +; RV64V-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; RV64V-NEXT: vfmv.v.f v16, fa0 +; RV64V-NEXT: ret + %head = insertelement undef, double %f, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + ret %splat +}