Changeset View
Changeset View
Standalone View
Standalone View
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
Show First 20 Lines • Show All 1,716 Lines • ▼ Show 20 Lines | |||||
; | ; | ||||
; RV64-LABEL: vpscatter_v32f64: | ; RV64-LABEL: vpscatter_v32f64: | ||||
; RV64: # %bb.0: | ; RV64: # %bb.0: | ||||
; RV64-NEXT: addi sp, sp, -16 | ; RV64-NEXT: addi sp, sp, -16 | ||||
; RV64-NEXT: .cfi_def_cfa_offset 16 | ; RV64-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV64-NEXT: csrr a1, vlenb | ; RV64-NEXT: csrr a1, vlenb | ||||
; RV64-NEXT: slli a1, a1, 3 | ; RV64-NEXT: slli a1, a1, 3 | ||||
; RV64-NEXT: sub sp, sp, a1 | ; RV64-NEXT: sub sp, sp, a1 | ||||
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb | |||||
; RV64-NEXT: addi a1, a0, 128 | ; RV64-NEXT: addi a1, a0, 128 | ||||
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma | ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma | ||||
; RV64-NEXT: vle64.v v24, (a1) | ; RV64-NEXT: vle64.v v24, (a1) | ||||
; RV64-NEXT: addi a1, sp, 16 | ; RV64-NEXT: addi a1, sp, 16 | ||||
; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill | ; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill | ||||
; RV64-NEXT: vle64.v v24, (a0) | ; RV64-NEXT: vle64.v v24, (a0) | ||||
; RV64-NEXT: li a1, 16 | ; RV64-NEXT: li a1, 16 | ||||
; RV64-NEXT: mv a0, a2 | ; RV64-NEXT: mv a0, a2 | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
; RV64-LABEL: vpscatter_baseidx_v32i32_v32f64: | ; RV64-LABEL: vpscatter_baseidx_v32i32_v32f64: | ||||
; RV64: # %bb.0: | ; RV64: # %bb.0: | ||||
; RV64-NEXT: addi sp, sp, -16 | ; RV64-NEXT: addi sp, sp, -16 | ||||
; RV64-NEXT: .cfi_def_cfa_offset 16 | ; RV64-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV64-NEXT: csrr a3, vlenb | ; RV64-NEXT: csrr a3, vlenb | ||||
; RV64-NEXT: li a4, 10 | ; RV64-NEXT: li a4, 10 | ||||
; RV64-NEXT: mul a3, a3, a4 | ; RV64-NEXT: mul a3, a3, a4 | ||||
; RV64-NEXT: sub sp, sp, a3 | ; RV64-NEXT: sub sp, sp, a3 | ||||
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 10 * vlenb | |||||
; RV64-NEXT: li a3, 32 | ; RV64-NEXT: li a3, 32 | ||||
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma | ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma | ||||
; RV64-NEXT: vle32.v v24, (a1) | ; RV64-NEXT: vle32.v v24, (a1) | ||||
; RV64-NEXT: addi a1, sp, 16 | ; RV64-NEXT: addi a1, sp, 16 | ||||
; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill | ; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill | ||||
; RV64-NEXT: csrr a1, vlenb | ; RV64-NEXT: csrr a1, vlenb | ||||
; RV64-NEXT: add a1, sp, a1 | ; RV64-NEXT: add a1, sp, a1 | ||||
; RV64-NEXT: addi a1, a1, 16 | ; RV64-NEXT: addi a1, a1, 16 | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | |||||
; RV64-LABEL: vpscatter_baseidx_sext_v32i32_v32f64: | ; RV64-LABEL: vpscatter_baseidx_sext_v32i32_v32f64: | ||||
; RV64: # %bb.0: | ; RV64: # %bb.0: | ||||
; RV64-NEXT: addi sp, sp, -16 | ; RV64-NEXT: addi sp, sp, -16 | ||||
; RV64-NEXT: .cfi_def_cfa_offset 16 | ; RV64-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV64-NEXT: csrr a3, vlenb | ; RV64-NEXT: csrr a3, vlenb | ||||
; RV64-NEXT: li a4, 10 | ; RV64-NEXT: li a4, 10 | ||||
; RV64-NEXT: mul a3, a3, a4 | ; RV64-NEXT: mul a3, a3, a4 | ||||
; RV64-NEXT: sub sp, sp, a3 | ; RV64-NEXT: sub sp, sp, a3 | ||||
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 10 * vlenb | |||||
; RV64-NEXT: li a3, 32 | ; RV64-NEXT: li a3, 32 | ||||
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma | ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma | ||||
; RV64-NEXT: vle32.v v24, (a1) | ; RV64-NEXT: vle32.v v24, (a1) | ||||
; RV64-NEXT: addi a1, sp, 16 | ; RV64-NEXT: addi a1, sp, 16 | ||||
; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill | ; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill | ||||
; RV64-NEXT: csrr a1, vlenb | ; RV64-NEXT: csrr a1, vlenb | ||||
; RV64-NEXT: add a1, sp, a1 | ; RV64-NEXT: add a1, sp, a1 | ||||
; RV64-NEXT: addi a1, a1, 16 | ; RV64-NEXT: addi a1, a1, 16 | ||||
▲ Show 20 Lines • Show All 70 Lines • ▼ Show 20 Lines | |||||
; RV64-LABEL: vpscatter_baseidx_zext_v32i32_v32f64: | ; RV64-LABEL: vpscatter_baseidx_zext_v32i32_v32f64: | ||||
; RV64: # %bb.0: | ; RV64: # %bb.0: | ||||
; RV64-NEXT: addi sp, sp, -16 | ; RV64-NEXT: addi sp, sp, -16 | ||||
; RV64-NEXT: .cfi_def_cfa_offset 16 | ; RV64-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV64-NEXT: csrr a3, vlenb | ; RV64-NEXT: csrr a3, vlenb | ||||
; RV64-NEXT: li a4, 10 | ; RV64-NEXT: li a4, 10 | ||||
; RV64-NEXT: mul a3, a3, a4 | ; RV64-NEXT: mul a3, a3, a4 | ||||
; RV64-NEXT: sub sp, sp, a3 | ; RV64-NEXT: sub sp, sp, a3 | ||||
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 10 * vlenb | |||||
; RV64-NEXT: li a3, 32 | ; RV64-NEXT: li a3, 32 | ||||
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma | ; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma | ||||
; RV64-NEXT: vle32.v v24, (a1) | ; RV64-NEXT: vle32.v v24, (a1) | ||||
; RV64-NEXT: addi a1, sp, 16 | ; RV64-NEXT: addi a1, sp, 16 | ||||
; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill | ; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill | ||||
; RV64-NEXT: csrr a1, vlenb | ; RV64-NEXT: csrr a1, vlenb | ||||
; RV64-NEXT: add a1, sp, a1 | ; RV64-NEXT: add a1, sp, a1 | ||||
; RV64-NEXT: addi a1, a1, 16 | ; RV64-NEXT: addi a1, a1, 16 | ||||
▲ Show 20 Lines • Show All 42 Lines • Show Last 20 Lines |