diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1047,6 +1047,8 @@ case ISD::ANY_EXTEND: case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: + case ISD::VP_SIGN_EXTEND: + case ISD::VP_ZERO_EXTEND: SplitVecRes_ExtendOp(N, Lo, Hi); break; @@ -3773,11 +3775,13 @@ case ISD::FP_TO_SINT: case ISD::FP_TO_UINT: case ISD::SIGN_EXTEND: + case ISD::VP_SIGN_EXTEND: case ISD::SINT_TO_FP: case ISD::VP_TRUNCATE: case ISD::TRUNCATE: case ISD::UINT_TO_FP: case ISD::ZERO_EXTEND: + case ISD::VP_ZERO_EXTEND: Res = WidenVecRes_Convert(N); break; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll @@ -147,3 +147,61 @@ %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i64> %v } + +declare <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32) + +define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsext_v32i64_v32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB12_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v24, v8, 16 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vsext.vf2 v16, v24, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB12_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vsext.vf2 v24, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32> %va, <32 x i1> %m, i32 %evl) + ret <32 x i64> %v +} + +define <32 x i64> @vsext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsext_v32i64_v32i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: bltu a0, a2, .LBB13_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v24, v8, 16 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vsext.vf2 v16, v24 +; CHECK-NEXT: bltu a0, a1, .LBB13_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf2 v24, v8 +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) + ret <32 x i64> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll @@ -147,3 +147,61 @@ %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i64> %v } + +declare <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32) + +define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vzext_v32i64_v32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: bltu a0, a2, .LBB12_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v24, v8, 16 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vzext.vf2 v16, v24, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB12_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vzext.vf2 v24, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32> %va, <32 x i1> %m, i32 %evl) + ret <32 x i64> %v +} + +define <32 x i64> @vzext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vzext_v32i64_v32i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a2, a0, -16 +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: bltu a0, a2, .LBB13_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v24, v8, 16 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: vzext.vf2 v16, v24 +; CHECK-NEXT: bltu a0, a1, .LBB13_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf2 v24, v8 +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) + ret <32 x i64> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -145,3 +145,61 @@ %v = call @llvm.vp.sext.nxv2i64.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v } + +declare @llvm.vp.sext.nxv32i32.nxv32i8(, , i32) + +define @vsext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vsext_nxv32i8_nxv32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB12_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsext.vf4 v16, v10, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB12_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vsext.vf4 v24, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.sext.nxv32i32.nxv32i8( %a, %m, i32 %vl) + ret %v +} + +define @vsext_nxv32i8_nxv32i32_unmasked( %a, i32 zeroext %vl) { +; CHECK-LABEL: vsext_nxv32i8_nxv32i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB13_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vsext.vf4 v24, v8 +; CHECK-NEXT: bltu a0, a1, .LBB13_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsext.vf4 v16, v10 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.sext.nxv32i32.nxv32i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -145,3 +145,61 @@ %v = call @llvm.vp.zext.nxv2i64.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v } + +declare @llvm.vp.zext.nxv32i32.nxv32i8(, , i32) + +define @vzext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { +; CHECK-LABEL: vzext_nxv32i8_nxv32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a4, a1, 2 +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: sub a3, a0, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a4 +; CHECK-NEXT: bltu a0, a3, .LBB12_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB12_2: +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vzext.vf4 v16, v10, v0.t +; CHECK-NEXT: bltu a0, a1, .LBB12_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB12_4: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vzext.vf4 v24, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.zext.nxv32i32.nxv32i8( %a, %m, i32 %vl) + ret %v +} + +define @vzext_nxv32i8_nxv32i32_unmasked( %a, i32 zeroext %vl) { +; CHECK-LABEL: vzext_nxv32i8_nxv32i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: bltu a0, a1, .LBB13_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: .LBB13_2: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: sub a1, a0, a1 +; CHECK-NEXT: vzext.vf4 v24, v8 +; CHECK-NEXT: bltu a0, a1, .LBB13_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a3, a1 +; CHECK-NEXT: .LBB13_4: +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vzext.vf4 v16, v10 +; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.zext.nxv32i32.nxv32i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + ret %v +}