diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1027,7 +1027,9 @@ case ISD::FREEZE: case ISD::ARITH_FENCE: case ISD::FP_EXTEND: + case ISD::VP_FP_EXTEND: case ISD::FP_ROUND: + case ISD::VP_FP_ROUND: case ISD::FP_TO_SINT: case ISD::FP_TO_UINT: case ISD::FRINT: @@ -2670,6 +2672,7 @@ Res = SplitVecOp_TruncateHelper(N); break; case ISD::STRICT_FP_ROUND: + case ISD::VP_FP_ROUND: case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break; case ISD::FCOPYSIGN: Res = SplitVecOp_FCOPYSIGN(N); break; case ISD::STORE: @@ -3546,6 +3549,13 @@ SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), Hi.getValue(1)); ReplaceValueWith(SDValue(N, 1), NewChain); + } else if (N->getOpcode() == ISD::VP_FP_ROUND) { + SDValue MaskLo, MaskHi, EVLLo, EVLHi; + std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1)); + std::tie(EVLLo, EVLHi) = + DAG.SplitEVL(N->getOperand(2), N->getValueType(0), DL); + Lo = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Lo, MaskLo, EVLLo); + Hi = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Hi, MaskHi, EVLHi); } else { Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1)); Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1)); @@ -3768,8 +3778,10 @@ break; case ISD::ANY_EXTEND: + case ISD::VP_FP_EXTEND: case ISD::FP_EXTEND: case ISD::FP_ROUND: + case ISD::VP_FP_ROUND: case ISD::FP_TO_SINT: case ISD::FP_TO_UINT: case ISD::SIGN_EXTEND: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll @@ -75,3 +75,48 @@ %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x double> %v } + +declare <15 x double> @llvm.vp.fpext.v15f64.v15f32(<15 x float>, <15 x i1>, i32) + +define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vfpext_v15f32_v15f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret + %v = call <15 x double> @llvm.vp.fpext.v15f64.v15f32(<15 x float> %a, <15 x i1> %m, i32 %vl) + ret <15 x double> %v +} + +declare <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float>, <32 x i1>, i32) + +define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vfpext_v32f32_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v24, v8, 16 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfwcvt.f.f.v v16, v24, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB7_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float> %a, <32 x i1> %m, i32 %vl) + ret <32 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll @@ -75,3 +75,62 @@ %v = call <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x float> %v } + +declare <15 x float> @llvm.vp.fptrunc.v15f64.v15f32(<15 x double>, <15 x i1>, i32) + +define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vfptrunc_v15f32_v15f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call <15 x float> @llvm.vp.fptrunc.v15f64.v15f32(<15 x double> %a, <15 x i1> %m, i32 %vl) + ret <15 x float> %v +} + +declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i32) + +define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vfptrunc_v32f32_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB7_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vslideup.vi v16, v8, 16 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double> %a, <32 x i1> %m, i32 %vl) + ret <32 x float> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -75,3 +75,48 @@ %v = call @llvm.vp.fpext.nxv2f64.nxv2f32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v } + +declare @llvm.vp.fpext.nxv7f64.nxv7f32(, , i32) + +define @vfpext_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vfpext_nxv7f32_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fpext.nxv7f64.nxv7f32( %a, %m, i32 %vl) + ret %v +} + +declare @llvm.vp.fpext.nxv32f32.nxv32f16(, , i32) + +define @vfpext_nxv32f16_nxv32f32( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vfpext_nxv32f16_nxv32f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB7_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.fpext.nxv32f32.nxv32f16( %a, %m, i32 %vl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -75,3 +75,158 @@ %v = call @llvm.vp.fptrunc.nxv2f64.nxv2f32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v } + +declare @llvm.vp.fptrunc.nxv7f64.nxv7f32(, , i32) + +define @vfptrunc_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vfptrunc_nxv7f32_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptrunc.nxv7f64.nxv7f32( %a, %m, i32 %vl) + ret %v +} + +declare @llvm.vp.fptrunc.nxv16f64.nxv16f32(, , i32) + +define @vfptrunc_nxv16f32_nxv16f64( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 3 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB7_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB7_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptrunc.nxv16f64.nxv16f32( %a, %m, i32 %vl) + ret %v +} + +declare @llvm.vp.fptrunc.nxv32f64.nxv32f32(, , i32) + +define @vfptrunc_nxv32f32_nxv32f64( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vfptrunc_nxv32f32_nxv32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a4, a1, 1 +; CHECK-NEXT: srli a3, a1, 3 +; CHECK-NEXT: mv a5, a2 +; CHECK-NEXT: bltu a2, a4, .LBB8_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: .LBB8_2: +; CHECK-NEXT: li a6, 0 +; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-NEXT: sub a7, a5, a1 +; CHECK-NEXT: vslidedown.vx v0, v24, a3 +; CHECK-NEXT: bltu a5, a7, .LBB8_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a6, a7 +; CHECK-NEXT: .LBB8_4: +; CHECK-NEXT: srli a7, a1, 2 +; CHECK-NEXT: slli t0, a1, 3 +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t +; CHECK-NEXT: bltu a5, a1, .LBB8_6 +; CHECK-NEXT: # %bb.5: +; CHECK-NEXT: mv a5, a1 +; CHECK-NEXT: .LBB8_6: +; CHECK-NEXT: li a6, 0 +; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vslidedown.vx v1, v24, a7 +; CHECK-NEXT: add a7, a0, t0 +; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu +; CHECK-NEXT: sub a4, a2, a4 +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a5, a5, 3 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: bltu a2, a4, .LBB8_8 +; CHECK-NEXT: # %bb.7: +; CHECK-NEXT: mv a6, a4 +; CHECK-NEXT: .LBB8_8: +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vl8re64.v v16, (a7) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: sub a4, a6, a1 +; CHECK-NEXT: vslidedown.vx v0, v1, a3 +; CHECK-NEXT: bltu a6, a4, .LBB8_10 +; CHECK-NEXT: # %bb.9: +; CHECK-NEXT: mv a2, a4 +; CHECK-NEXT: .LBB8_10: +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t +; CHECK-NEXT: bltu a6, a1, .LBB8_12 +; CHECK-NEXT: # %bb.11: +; CHECK-NEXT: mv a6, a1 +; CHECK-NEXT: .LBB8_12: +; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptrunc.nxv32f64.nxv32f32( %a, %m, i32 %vl) + ret %v +}