Changeset View
Changeset View
Standalone View
Standalone View
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
Show First 20 Lines • Show All 1,586 Lines • ▼ Show 20 Lines | |||||
define i64 @vwreduce_add_v64i64(<64 x i32>* %x) { | define i64 @vwreduce_add_v64i64(<64 x i32>* %x) { | ||||
; RV32-LABEL: vwreduce_add_v64i64: | ; RV32-LABEL: vwreduce_add_v64i64: | ||||
; RV32: # %bb.0: | ; RV32: # %bb.0: | ||||
; RV32-NEXT: addi sp, sp, -16 | ; RV32-NEXT: addi sp, sp, -16 | ||||
; RV32-NEXT: .cfi_def_cfa_offset 16 | ; RV32-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV32-NEXT: csrr a1, vlenb | ; RV32-NEXT: csrr a1, vlenb | ||||
; RV32-NEXT: slli a1, a1, 5 | ; RV32-NEXT: slli a1, a1, 5 | ||||
; RV32-NEXT: sub sp, sp, a1 | ; RV32-NEXT: sub sp, sp, a1 | ||||
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb | |||||
; RV32-NEXT: addi a1, a0, 128 | ; RV32-NEXT: addi a1, a0, 128 | ||||
; RV32-NEXT: li a2, 32 | ; RV32-NEXT: li a2, 32 | ||||
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ||||
; RV32-NEXT: vle32.v v8, (a0) | ; RV32-NEXT: vle32.v v8, (a0) | ||||
; RV32-NEXT: addi a0, sp, 16 | ; RV32-NEXT: addi a0, sp, 16 | ||||
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ||||
; RV32-NEXT: vle32.v v16, (a1) | ; RV32-NEXT: vle32.v v16, (a1) | ||||
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
; | ; | ||||
; RV64-LABEL: vwreduce_add_v64i64: | ; RV64-LABEL: vwreduce_add_v64i64: | ||||
; RV64: # %bb.0: | ; RV64: # %bb.0: | ||||
; RV64-NEXT: addi sp, sp, -16 | ; RV64-NEXT: addi sp, sp, -16 | ||||
; RV64-NEXT: .cfi_def_cfa_offset 16 | ; RV64-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV64-NEXT: csrr a1, vlenb | ; RV64-NEXT: csrr a1, vlenb | ||||
; RV64-NEXT: slli a1, a1, 5 | ; RV64-NEXT: slli a1, a1, 5 | ||||
; RV64-NEXT: sub sp, sp, a1 | ; RV64-NEXT: sub sp, sp, a1 | ||||
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb | |||||
; RV64-NEXT: addi a1, a0, 128 | ; RV64-NEXT: addi a1, a0, 128 | ||||
; RV64-NEXT: li a2, 32 | ; RV64-NEXT: li a2, 32 | ||||
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ||||
; RV64-NEXT: vle32.v v8, (a0) | ; RV64-NEXT: vle32.v v8, (a0) | ||||
; RV64-NEXT: addi a0, sp, 16 | ; RV64-NEXT: addi a0, sp, 16 | ||||
; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ||||
; RV64-NEXT: vle32.v v16, (a1) | ; RV64-NEXT: vle32.v v16, (a1) | ||||
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | |||||
define i64 @vwreduce_uadd_v64i64(<64 x i32>* %x) { | define i64 @vwreduce_uadd_v64i64(<64 x i32>* %x) { | ||||
; RV32-LABEL: vwreduce_uadd_v64i64: | ; RV32-LABEL: vwreduce_uadd_v64i64: | ||||
; RV32: # %bb.0: | ; RV32: # %bb.0: | ||||
; RV32-NEXT: addi sp, sp, -16 | ; RV32-NEXT: addi sp, sp, -16 | ||||
; RV32-NEXT: .cfi_def_cfa_offset 16 | ; RV32-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV32-NEXT: csrr a1, vlenb | ; RV32-NEXT: csrr a1, vlenb | ||||
; RV32-NEXT: slli a1, a1, 5 | ; RV32-NEXT: slli a1, a1, 5 | ||||
; RV32-NEXT: sub sp, sp, a1 | ; RV32-NEXT: sub sp, sp, a1 | ||||
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb | |||||
; RV32-NEXT: addi a1, a0, 128 | ; RV32-NEXT: addi a1, a0, 128 | ||||
; RV32-NEXT: li a2, 32 | ; RV32-NEXT: li a2, 32 | ||||
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ||||
; RV32-NEXT: vle32.v v8, (a0) | ; RV32-NEXT: vle32.v v8, (a0) | ||||
; RV32-NEXT: addi a0, sp, 16 | ; RV32-NEXT: addi a0, sp, 16 | ||||
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ||||
; RV32-NEXT: vle32.v v16, (a1) | ; RV32-NEXT: vle32.v v16, (a1) | ||||
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
; | ; | ||||
; RV64-LABEL: vwreduce_uadd_v64i64: | ; RV64-LABEL: vwreduce_uadd_v64i64: | ||||
; RV64: # %bb.0: | ; RV64: # %bb.0: | ||||
; RV64-NEXT: addi sp, sp, -16 | ; RV64-NEXT: addi sp, sp, -16 | ||||
; RV64-NEXT: .cfi_def_cfa_offset 16 | ; RV64-NEXT: .cfi_def_cfa_offset 16 | ||||
; RV64-NEXT: csrr a1, vlenb | ; RV64-NEXT: csrr a1, vlenb | ||||
; RV64-NEXT: slli a1, a1, 5 | ; RV64-NEXT: slli a1, a1, 5 | ||||
; RV64-NEXT: sub sp, sp, a1 | ; RV64-NEXT: sub sp, sp, a1 | ||||
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb | |||||
; RV64-NEXT: addi a1, a0, 128 | ; RV64-NEXT: addi a1, a0, 128 | ||||
; RV64-NEXT: li a2, 32 | ; RV64-NEXT: li a2, 32 | ||||
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma | ||||
; RV64-NEXT: vle32.v v8, (a0) | ; RV64-NEXT: vle32.v v8, (a0) | ||||
; RV64-NEXT: addi a0, sp, 16 | ; RV64-NEXT: addi a0, sp, 16 | ||||
; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill | ||||
; RV64-NEXT: vle32.v v16, (a1) | ; RV64-NEXT: vle32.v v16, (a1) | ||||
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma | ||||
▲ Show 20 Lines • Show All 5,759 Lines • Show Last 20 Lines |