diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1029,7 +1029,9 @@ case ISD::FP_EXTEND: case ISD::FP_ROUND: case ISD::FP_TO_SINT: + case ISD::VP_FPTOSI: case ISD::FP_TO_UINT: + case ISD::VP_FPTOUI: case ISD::FRINT: case ISD::FROUND: case ISD::FROUNDEVEN: @@ -1037,9 +1039,11 @@ case ISD::FSQRT: case ISD::FTRUNC: case ISD::SINT_TO_FP: + case ISD::VP_SITOFP: case ISD::TRUNCATE: case ISD::VP_TRUNCATE: case ISD::UINT_TO_FP: + case ISD::VP_UITOFP: case ISD::FCANONICALIZE: SplitVecRes_UnaryOp(N, Lo, Hi); break; @@ -3793,13 +3797,17 @@ case ISD::FP_EXTEND: case ISD::FP_ROUND: case ISD::FP_TO_SINT: + case ISD::VP_FPTOSI: case ISD::FP_TO_UINT: + case ISD::VP_FPTOUI: case ISD::SIGN_EXTEND: case ISD::VP_SIGN_EXTEND: case ISD::SINT_TO_FP: + case ISD::VP_SITOFP: case ISD::VP_TRUNCATE: case ISD::TRUNCATE: case ISD::UINT_TO_FP: + case ISD::VP_UITOFP: case ISD::ZERO_EXTEND: case ISD::VP_ZERO_EXTEND: Res = WidenVecRes_Convert(N); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -311,3 +311,54 @@ ret <4 x i64> %v } +declare <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double>, <32 x i1>, i32) + +define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v32i64_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) + ret <32 x i64> %v +} + +define <32 x i64> @vfptosi_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v32i64_v32f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) + ret <32 x i64> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -311,3 +311,54 @@ ret <4 x i64> %v } +declare <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double>, <32 x i1>, i32) + +define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptoui_v32i64_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) + ret <32 x i64> %v +} + +define <32 x i64> @vfptoui_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptoui_v32i64_v32f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) + ret <32 x i64> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -302,3 +302,55 @@ %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x double> %v } + +declare <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32) + +define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v32f64_v32i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} + +define <32 x double> @vsitofp_v32f64_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v32f64_v32i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.f.x.v v16, v16 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) + ret <32 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll @@ -302,3 +302,55 @@ %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x double> %v } + +declare <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32) + +define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vuitofp_v32f64_v32i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} + +define <32 x double> @vuitofp_v32f64_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vuitofp_v32f64_v32i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vfcvt.f.xu.v v16, v16 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) + ret <32 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -308,3 +308,59 @@ %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v } + +declare @llvm.vp.fptosi.nxv32i32.nxv32f32(, , i32) + +define @vfptosi_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv32i32.nxv32f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv32i32_nxv32f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -308,3 +308,59 @@ %v = call @llvm.vp.fptoui.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v } + +declare @llvm.vp.fptoui.nxv32i32.nxv32f32(, , i32) + +define @vfptoui_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptoui.nxv32i32.nxv32f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptoui_nxv32i32_nxv32f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptoui.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -300,3 +300,59 @@ %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v } + +declare @llvm.vp.sitofp.nxv32f32.nxv32i32(, , i32) + +define @vsitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv32f32.nxv32i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv32f32_nxv32i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v16, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -300,3 +300,59 @@ %v = call @llvm.vp.uitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v } + +declare @llvm.vp.uitofp.nxv32f32.nxv32i32(, , i32) + +define @vuitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB25_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB25_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.uitofp.nxv32f32.nxv32i32( %va, %m, i32 %evl) + ret %v +} + +define @vuitofp_nxv32f32_nxv32i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB26_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB26_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: bltu a0, a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB26_4: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v16, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.uitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +}