Changeset View
Changeset View
Standalone View
Standalone View
llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
- This file was added.
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | |||||
; RUN: llc < %s | FileCheck %s | |||||
target triple = "aarch64-unknown-linux-gnu" | |||||
; Check that we don't try and merge uunpklo/uzp1 with a load or store if we | |||||
; would end up creating a predicate that would be too large for the max VL. | |||||
; UUNPKLO + Load | |||||
define <vscale x 8 x i16> @uunpklo_i8_valid(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_i8_valid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.h, vl64 | |||||
; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) | |||||
%load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef) | |||||
%uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %load) | |||||
ret <vscale x 8 x i16> %uzp | |||||
} | |||||
define <vscale x 8 x i16> @uunpklo_i8_invalid(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_i8_invalid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.b, vl128 | |||||
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] | |||||
; CHECK-NEXT: uunpklo z0.h, z0.b | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 12) | |||||
%load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef) | |||||
%uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %load) | |||||
ret <vscale x 8 x i16> %uzp | |||||
} | |||||
define <vscale x 4 x i32> @uunpklo_i16_valid(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_i16_valid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.s, vl32 | |||||
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) | |||||
%load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef) | |||||
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %load) | |||||
ret <vscale x 4 x i32> %uzp | |||||
} | |||||
define <vscale x 4 x i32> @uunpklo_i16_invalid(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_i16_invalid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.h, vl64 | |||||
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] | |||||
; CHECK-NEXT: uunpklo z0.s, z0.h | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 11) | |||||
%load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef) | |||||
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %load) | |||||
ret <vscale x 4 x i32> %uzp | |||||
} | |||||
define <vscale x 2 x i64> @uunpklo_i32_valid(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_i32_valid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.d, vl16 | |||||
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 9) | |||||
%load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef) | |||||
%uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load) | |||||
ret <vscale x 2 x i64> %uzp | |||||
} | |||||
define <vscale x 2 x i64> @uunpklo_i32_invalid(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_i32_invalid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.s, vl32 | |||||
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] | |||||
; CHECK-NEXT: uunpklo z0.d, z0.s | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) | |||||
%load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef) | |||||
%uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load) | |||||
ret <vscale x 2 x i64> %uzp | |||||
} | |||||
define <vscale x 2 x i64> @uunpklo_invalid_all(ptr %b) #0 { | |||||
; CHECK-LABEL: uunpklo_invalid_all: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.s | |||||
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] | |||||
; CHECK-NEXT: uunpklo z0.d, z0.s | |||||
; CHECK-NEXT: ret | |||||
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) | |||||
%load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef) | |||||
%uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load) | |||||
ret <vscale x 2 x i64> %uzp | |||||
} | |||||
; UZP1 + Store | |||||
define void @uzp1_i8_valid(<vscale x 8 x i16> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_i8_valid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.h, vl64 | |||||
; CHECK-NEXT: st1b { z0.h }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 8 x i16> %a to <vscale x 16 x i8> | |||||
%uzp = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8> %a.bc, <vscale x 16 x i8> %a.bc) | |||||
%mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) | |||||
call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %uzp, ptr %b, i32 2, <vscale x 16 x i1> %mask) | |||||
ret void | |||||
} | |||||
define void @uzp1_i8_invalid(<vscale x 8 x i16> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_i8_invalid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.b, vl128 | |||||
; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b | |||||
; CHECK-NEXT: st1b { z0.b }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 8 x i16> %a to <vscale x 16 x i8> | |||||
%uzp = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8> %a.bc, <vscale x 16 x i8> %a.bc) | |||||
%mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 12) | |||||
call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %uzp, ptr %b, i32 2, <vscale x 16 x i1> %mask) | |||||
ret void | |||||
} | |||||
define void @uzp1_i16_valid(<vscale x 4 x i32> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_i16_valid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.s, vl32 | |||||
; CHECK-NEXT: st1h { z0.s }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 4 x i32> %a to <vscale x 8 x i16> | |||||
%uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16> %a.bc, <vscale x 8 x i16> %a.bc) | |||||
%mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) | |||||
call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %uzp, ptr %b, i32 2, <vscale x 8 x i1> %mask) | |||||
ret void | |||||
} | |||||
define void @uzp1_i16_invalid(<vscale x 4 x i32> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_i16_invalid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.h, vl64 | |||||
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h | |||||
; CHECK-NEXT: st1h { z0.h }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 4 x i32> %a to <vscale x 8 x i16> | |||||
%uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16> %a.bc, <vscale x 8 x i16> %a.bc) | |||||
%mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 11) | |||||
call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %uzp, ptr %b, i32 2, <vscale x 8 x i1> %mask) | |||||
ret void | |||||
} | |||||
define void @uzp1_i32_valid(<vscale x 2 x i64> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_i32_valid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.d, vl16 | |||||
; CHECK-NEXT: st1w { z0.d }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32> | |||||
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc) | |||||
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 9) | |||||
call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %uzp, ptr %b, i32 2, <vscale x 4 x i1> %mask) | |||||
ret void | |||||
} | |||||
define void @uzp1_i32_invalid(<vscale x 2 x i64> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_i32_invalid: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.s, vl32 | |||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s | |||||
; CHECK-NEXT: st1w { z0.s }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32> | |||||
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc) | |||||
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) | |||||
call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %uzp, ptr %b, i32 2, <vscale x 4 x i1> %mask) | |||||
ret void | |||||
} | |||||
define void @uzp1_invalid_all(<vscale x 2 x i64> %a, ptr %b) #0 { | |||||
; CHECK-LABEL: uzp1_invalid_all: | |||||
; CHECK: // %bb.0: | |||||
; CHECK-NEXT: ptrue p0.s | |||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s | |||||
; CHECK-NEXT: st1w { z0.s }, p0, [x0] | |||||
; CHECK-NEXT: ret | |||||
%a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32> | |||||
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc) | |||||
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) | |||||
call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %uzp, ptr %b, i32 2, <vscale x 4 x i1> %mask) | |||||
ret void | |||||
} | |||||
declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern) | |||||
declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern) | |||||
declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern) | |||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8>) | |||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16>) | |||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32>) | |||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) | |||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) | |||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) | |||||
declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>) | |||||
declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>) | |||||
declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) | |||||
declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>) | |||||
declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>) | |||||
declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>) | |||||
attributes #0 = { "target-features"="+sve" vscale_range(8,0) } |