diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -579,3 +579,84 @@ %ext = extractelement %bo, i32 0 ret float %ext } + +define double @extractelt_nxv16f64_0( %v) { +; CHECK-LABEL: extractelt_nxv16f64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %r = extractelement %v, i32 0 + ret double %r +} + +define double @extractelt_nxv16f64_neg1( %v) { +; CHECK-LABEL: extractelt_nxv16f64_neg1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: addi a0, sp, 64 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 3 +; CHECK-NEXT: add a2, a0, a2 +; CHECK-NEXT: vs8r.v v16, (a2) +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: fld fa0, -8(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %r = extractelement %v, i32 -1 + ret double %r +} + +define double @extractelt_nxv16f64_imm( %v) { +; CHECK-LABEL: extractelt_nxv16f64_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 2 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %r = extractelement %v, i32 2 + ret double %r +} + +define double @extractelt_nxv16f64_idx( %v, i32 signext %idx) { +; CHECK-LABEL: extractelt_nxv16f64_idx: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: bltu a0, a2, .LBB54_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a2 +; CHECK-NEXT: .LBB54_2: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: addi a2, sp, 64 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: vs8r.v v8, (a2) +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: vs8r.v v16, (a1) +; CHECK-NEXT: fld fa0, 0(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %r = extractelement %v, i32 %idx + ret double %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll @@ -127,3 +127,45 @@ %c = extractelement %b, i64 %idx ret i1 %c } + +define i1 @extractelt_nxv128i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv128i1: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a3, a2, 4 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: bltu a1, a3, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a3 +; CHECK-NEXT: .LBB7_2: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a3, a3, 4 +; CHECK-NEXT: sub sp, sp, a3 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: addi a3, sp, 64 +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a4, a0, a2 +; CHECK-NEXT: vl8r.v v16, (a4) +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: add a0, a3, a1 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vmseq.vi v8, v16, 0 +; CHECK-NEXT: vmseq.vi v0, v24, 0 +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 +; CHECK-NEXT: vs8r.v v24, (a3) +; CHECK-NEXT: add a1, a3, a2 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 +; CHECK-NEXT: vs8r.v v8, (a1) +; CHECK-NEXT: lb a0, 0(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -859,3 +859,84 @@ %ext = extractelement %bo, i32 0 ret i32 %ext } + +define i32 @extractelt_nxv32i32_0( %v) { +; CHECK-LABEL: extractelt_nxv32i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %r = extractelement %v, i32 0 + ret i32 %r +} + +define i32 @extractelt_nxv32i32_neg1( %v) { +; CHECK-LABEL: extractelt_nxv32i32_neg1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: addi a0, sp, 64 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 3 +; CHECK-NEXT: add a2, a0, a2 +; CHECK-NEXT: vs8r.v v16, (a2) +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: lw a0, -4(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %r = extractelement %v, i32 -1 + ret i32 %r +} + +define i32 @extractelt_nxv32i32_imm( %v) { +; CHECK-LABEL: extractelt_nxv32i32_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 2 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %r = extractelement %v, i32 2 + ret i32 %r +} + +define i32 @extractelt_nxv32i32_idx( %v, i32 %idx) { +; CHECK-LABEL: extractelt_nxv32i32_idx: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: bltu a0, a2, .LBB74_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a2 +; CHECK-NEXT: .LBB74_2: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: addi a2, sp, 64 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: vs8r.v v8, (a2) +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: vs8r.v v16, (a1) +; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %r = extractelement %v, i32 %idx + ret i32 %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -790,3 +790,84 @@ %ext = extractelement %bo, i32 0 ret i32 %ext } + +define i64 @extractelt_nxv16i64_0( %v) { +; CHECK-LABEL: extractelt_nxv16i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %r = extractelement %v, i32 0 + ret i64 %r +} + +define i64 @extractelt_nxv16i64_neg1( %v) { +; CHECK-LABEL: extractelt_nxv16i64_neg1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: addi a0, sp, 64 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 3 +; CHECK-NEXT: add a2, a0, a2 +; CHECK-NEXT: vs8r.v v16, (a2) +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: ld a0, -8(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %r = extractelement %v, i32 -1 + ret i64 %r +} + +define i64 @extractelt_nxv16i64_imm( %v) { +; CHECK-LABEL: extractelt_nxv16i64_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 2 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %r = extractelement %v, i32 2 + ret i64 %r +} + +define i64 @extractelt_nxv16i64_idx( %v, i32 signext %idx) { +; CHECK-LABEL: extractelt_nxv16i64_idx: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 1 +; CHECK-NEXT: addi a2, a2, -1 +; CHECK-NEXT: bltu a0, a2, .LBB74_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a2 +; CHECK-NEXT: .LBB74_2: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: addi a2, sp, 64 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: vs8r.v v8, (a2) +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: vs8r.v v16, (a1) +; CHECK-NEXT: ld a0, 0(a0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %r = extractelement %v, i32 %idx + ret i64 %r +}