diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -791,6 +791,7 @@ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -157,24 +157,22 @@ ; ; LMULMAX1-LABEL: fpround_v8f32_v8f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -32 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX1-NEXT: addi a2, a0, 16 -; LMULMAX1-NEXT: vle32.v v25, (a2) +; LMULMAX1-NEXT: vle32.v v25, (a0) +; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v26, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v27, v25 -; LMULMAX1-NEXT: addi a0, sp, 24 -; LMULMAX1-NEXT: vse16.v v27, (a0) -; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vse16.v v25, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vle16.v v25, (a0) +; LMULMAX1-NEXT: vmv.v.i v25, 0 +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v25, v27, 0 +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vfncvt.f.f.w v27, v26 +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v25, v27, 4 +; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 32 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptrunc <8 x float> %a to <8 x half> @@ -196,45 +194,42 @@ ; ; LMULMAX1-LABEL: fpround_v8f64_v8f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -32 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 +; LMULMAX1-NEXT: addi a2, a0, 48 ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; LMULMAX1-NEXT: vle64.v v25, (a0) -; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: vle64.v v25, (a2) +; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v26, (a2) -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vle64.v v27, (a2) -; LMULMAX1-NEXT: addi a0, a0, 32 +; LMULMAX1-NEXT: vle64.v v27, (a0) +; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v28, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v29, v27 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v27, v29 -; LMULMAX1-NEXT: addi a0, sp, 28 -; LMULMAX1-NEXT: vse16.v v27, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vmv.v.i v29, 0 +; LMULMAX1-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v27, 0 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v27, v28 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v28, v27 -; LMULMAX1-NEXT: addi a0, sp, 24 -; LMULMAX1-NEXT: vse16.v v28, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v28, 2 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v27, v26 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v26, v27 -; LMULMAX1-NEXT: addi a0, sp, 20 -; LMULMAX1-NEXT: vse16.v v26, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v26, 4 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v26, v25 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vse16.v v25, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vle16.v v25, (a0) -; LMULMAX1-NEXT: vse16.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 32 +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 +; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-NEXT: vse16.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x %d = fptrunc <8 x double> %a to <8 x half> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -466,45 +466,42 @@ ; ; LMULMAX1-LABEL: si2fp_v8i64_v8f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -32 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 +; LMULMAX1-NEXT: addi a2, a0, 48 ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; LMULMAX1-NEXT: vle64.v v25, (a0) -; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: vle64.v v25, (a2) +; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v26, (a2) -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vle64.v v27, (a2) -; LMULMAX1-NEXT: addi a0, a0, 32 +; LMULMAX1-NEXT: vle64.v v27, (a0) +; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v28, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v29, v27 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v27, v29 -; LMULMAX1-NEXT: addi a0, sp, 28 -; LMULMAX1-NEXT: vse16.v v27, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vmv.v.i v29, 0 +; LMULMAX1-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v27, 0 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v27, v28 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v28, v27 -; LMULMAX1-NEXT: addi a0, sp, 24 -; LMULMAX1-NEXT: vse16.v v28, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v28, 2 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v27, v26 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v26, v27 -; LMULMAX1-NEXT: addi a0, sp, 20 -; LMULMAX1-NEXT: vse16.v v26, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v26, 4 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v26, v25 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vse16.v v25, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vle16.v v25, (a0) -; LMULMAX1-NEXT: vse16.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 32 +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 +; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-NEXT: vse16.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %d = sitofp <8 x i64> %a to <8 x half> @@ -526,45 +523,42 @@ ; ; LMULMAX1-LABEL: ui2fp_v8i64_v8f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -32 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 +; LMULMAX1-NEXT: addi a2, a0, 48 ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; LMULMAX1-NEXT: vle64.v v25, (a0) -; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: vle64.v v25, (a2) +; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v26, (a2) -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vle64.v v27, (a2) -; LMULMAX1-NEXT: addi a0, a0, 32 +; LMULMAX1-NEXT: vle64.v v27, (a0) +; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v28, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v29, v27 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v27, v29 -; LMULMAX1-NEXT: addi a0, sp, 28 -; LMULMAX1-NEXT: vse16.v v27, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vmv.v.i v29, 0 +; LMULMAX1-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v27, 0 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v27, v28 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v28, v27 -; LMULMAX1-NEXT: addi a0, sp, 24 -; LMULMAX1-NEXT: vse16.v v28, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v28, 2 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v27, v26 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v26, v27 -; LMULMAX1-NEXT: addi a0, sp, 20 -; LMULMAX1-NEXT: vse16.v v26, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v26, 4 +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v26, v25 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vse16.v v25, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vle16.v v25, (a0) -; LMULMAX1-NEXT: vse16.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 32 +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 +; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-NEXT: vse16.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %d = uitofp <8 x i64> %a to <8 x half>