diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-addressing-modes.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-addressing-modes.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-addressing-modes.ll @@ -0,0 +1,1632 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; masked_gather + +; i8 +define void @masked_gather_base_plus_stride_v2i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: st1b { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, ptr %src, <2 x i64> + %data = tail call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> %ptrs, i32 2, <2 x i1> , <2 x i8> undef) + store <2 x i8> %data, ptr %dst, align 1 + ret void +} + +define void @masked_gather_base_plus_stride_v4i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1b { z0.h }, p0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = getelementptr i8, ptr %src, <4 x i64> + %data = tail call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> , <4 x i8> undef) + store <4 x i8> %data, ptr %dst, align 1 + ret void +} + +define void @masked_gather_base_plus_stride_v8i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: add z0.s, z0.s, #28 // =0x1c +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ldp d0, d2, [sp, #8] +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: mov z3.h, z2.h[1] +; CHECK-NEXT: strb w8, [sp, #24] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strb w9, [sp, #27] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z0.h, z2.h[3] +; CHECK-NEXT: mov z1.h, z2.h[2] +; CHECK-NEXT: strb w8, [sp, #26] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strb w10, [sp, #25] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w9, [sp, #28] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w8, [sp, #31] +; CHECK-NEXT: strb w10, [sp, #30] +; CHECK-NEXT: strb w9, [sp, #29] +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %ptrs = getelementptr i8, ptr %src, <8 x i64> + %data = tail call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> , <8 x i8> undef) + store <8 x i8> %data, ptr %dst, align 1 + ret void +} + +define void @masked_gather_base_plus_stride_v16i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #-48]! +; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: add z2.s, z2.s, #84 // =0x54 +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z2.s, sxtw] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: add z0.s, z0.s, #28 // =0x1c +; CHECK-NEXT: strh w9, [sp, #30] +; CHECK-NEXT: add z2.s, z2.s, #56 // =0x38 +; CHECK-NEXT: strh w10, [sp, #28] +; CHECK-NEXT: strh w8, [sp, #26] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z2.s, sxtw] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d0, [sp] +; CHECK-NEXT: ldp d2, d1, [sp, #16] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z3.h, z0.h[3] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z4.h, z0.h[2] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: strb w8, [sp, #32] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z5.h, z1.h[3] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: strb w9, [sp, #35] +; CHECK-NEXT: strb w8, [sp, #44] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: mov z6.h, z1.h[2] +; CHECK-NEXT: mov z1.h, z1.h[1] +; CHECK-NEXT: mov z7.h, z2.h[3] +; CHECK-NEXT: strb w8, [sp, #40] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: strb w10, [sp, #34] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strb w9, [sp, #47] +; CHECK-NEXT: strb w8, [sp, #33] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: mov z16.h, z2.h[2] +; CHECK-NEXT: mov z1.h, z2.h[1] +; CHECK-NEXT: strb w10, [sp, #46] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strb w8, [sp, #45] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strb w9, [sp, #43] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: mov z3.h, z0.h[1] +; CHECK-NEXT: strb w10, [sp, #42] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w8, [sp, #41] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #36] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w10, [sp, #39] +; CHECK-NEXT: strb w8, [sp, #38] +; CHECK-NEXT: strb w9, [sp, #37] +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret + %ptrs = getelementptr i8, ptr %src, <16 x i64> + %data = tail call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 2, <16 x i1> , <16 x i8> undef) + store <16 x i8> %data, ptr %dst, align 1 + ret void +} + +define void @masked_gather_base_plus_stride_v32i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: mov z0.s, #-84 // =0xffffffffffffffac +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: mov z2.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z3.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: mov z0.s, #-56 // =0xffffffffffffffc8 +; CHECK-NEXT: strh w8, [sp, #56] +; CHECK-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEXT: strh w9, [sp, #62] +; CHECK-NEXT: strh w10, [sp, #60] +; CHECK-NEXT: strh w11, [sp, #58] +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: mov z2.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z3.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: mov z0.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: strh w8, [sp, #48] +; CHECK-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEXT: strh w9, [sp, #54] +; CHECK-NEXT: adrp x9, .LCPI4_0 +; CHECK-NEXT: strh w10, [sp, #52] +; CHECK-NEXT: strh w11, [sp, #50] +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: strh w8, [sp, #40] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x9, :lo12:.LCPI4_0] +; CHECK-NEXT: strh w10, [sp, #46] +; CHECK-NEXT: strh w11, [sp, #44] +; CHECK-NEXT: strh w8, [sp, #42] +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #32] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: strh w9, [sp, #38] +; CHECK-NEXT: strh w10, [sp, #36] +; CHECK-NEXT: strh w8, [sp, #34] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: add z2.s, z2.s, #84 // =0x54 +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z2.s, sxtw] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: add z0.s, z0.s, #28 // =0x1c +; CHECK-NEXT: strh w9, [sp, #30] +; CHECK-NEXT: add z2.s, z2.s, #56 // =0x38 +; CHECK-NEXT: strh w10, [sp, #28] +; CHECK-NEXT: strh w8, [sp, #26] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1, z2.s, sxtw] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x1, z0.s, sxtw] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldp d1, d0, [sp, #48] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: mov z4.h, z1.h[3] +; CHECK-NEXT: mov z5.h, z1.h[2] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.h, z0.h[3] +; CHECK-NEXT: mov z3.h, z0.h[2] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strb w8, [sp, #92] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z1.h[1] +; CHECK-NEXT: strb w9, [sp, #95] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w8, [sp, #88] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d0, [sp, #40] +; CHECK-NEXT: strb w10, [sp, #94] +; CHECK-NEXT: strb w9, [sp, #91] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #93] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z3.h, z0.h[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strb w8, [sp, #89] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr d0, [sp, #32] +; CHECK-NEXT: strb w10, [sp, #90] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w9, [sp, #84] +; CHECK-NEXT: strb w8, [sp, #86] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: strb w10, [sp, #87] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #80] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d0, [sp] +; CHECK-NEXT: strb w9, [sp, #85] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strb w10, [sp, #82] +; CHECK-NEXT: ldp d2, d1, [sp, #16] +; CHECK-NEXT: strb w8, [sp, #81] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z3.h, z0.h[3] +; CHECK-NEXT: strb w9, [sp, #83] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z4.h, z0.h[2] +; CHECK-NEXT: strb w8, [sp, #64] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z5.h, z1.h[3] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: strb w9, [sp, #67] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: mov z6.h, z1.h[2] +; CHECK-NEXT: strb w8, [sp, #76] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z1.h, z1.h[1] +; CHECK-NEXT: mov z7.h, z2.h[3] +; CHECK-NEXT: strb w10, [sp, #66] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strb w8, [sp, #72] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: strb w9, [sp, #79] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: mov z16.h, z2.h[2] +; CHECK-NEXT: strb w8, [sp, #65] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z2.h[1] +; CHECK-NEXT: strb w10, [sp, #78] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strb w9, [sp, #75] +; CHECK-NEXT: strb w8, [sp, #77] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: mov z3.h, z0.h[1] +; CHECK-NEXT: strb w10, [sp, #74] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w8, [sp, #73] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #68] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w10, [sp, #71] +; CHECK-NEXT: strb w8, [sp, #70] +; CHECK-NEXT: strb w9, [sp, #69] +; CHECK-NEXT: ldp q0, q1, [sp, #64] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #96 +; CHECK-NEXT: ret + %ptrs = getelementptr i8, ptr %src, <32 x i64> + %data = tail call <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr> %ptrs, i32 2, <32 x i1> , <32 x i8> undef) + store <32 x i8> %data, ptr %dst, align 1 + ret void +} + +; i16 +define void @masked_gather_base_plus_stride_v2i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, ptr %src, <2 x i64> + %data = tail call <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr> %ptrs, i32 2, <2 x i1> , <2 x i16> undef) + store <2 x i16> %data, ptr %dst, align 2 + ret void +} + +define void @masked_gather_base_plus_stride_v4i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = getelementptr i16, ptr %src, <4 x i64> + %data = tail call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> , <4 x i16> undef) + store <4 x i16> %data, ptr %dst, align 2 + ret void +} + +define void @masked_gather_base_plus_stride_v8i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: add z0.s, z0.s, #28 // =0x1c +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: strh w8, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z3.s, z0.s[1] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = getelementptr i16, ptr %src, <8 x i64> + %data = tail call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> , <8 x i16> undef) + store <8 x i16> %data, ptr %dst, align 2 + ret void +} + +define void @masked_gather_base_plus_stride_v16i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: add z2.s, z2.s, #28 // =0x1c +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w8, [sp, #-32]! +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: ld1h { z2.s }, p0/z, [x1, z2.s, sxtw #1] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z1.s, z2.s[3] +; CHECK-NEXT: mov z3.s, z2.s[2] +; CHECK-NEXT: mov z4.s, z2.s[1] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: add z1.s, z1.s, #84 // =0x54 +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: add z0.s, z0.s, #56 // =0x38 +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1, z1.s, sxtw #1] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z3.s, z0.s[1] +; CHECK-NEXT: strh w9, [sp, #28] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #26] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %ptrs = getelementptr i16, ptr %src, <16 x i64> + %data = tail call <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr> %ptrs, i32 2, <16 x i1> , <16 x i16> undef) + store <16 x i16> %data, ptr %dst, align 2 + ret void +} + +; i32 +define void @masked_gather_base_plus_stride_v2i32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1, z0.s, sxtw #2] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, ptr %src, <2 x i64> + %data = tail call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> , <2 x i32> undef) + store <2 x i32> %data, ptr %dst, align 4 + ret void +} + +define void @masked_gather_base_plus_stride_v4i32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1, z0.s, sxtw #2] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, ptr %src, <4 x i64> + %data = tail call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> , <4 x i32> undef) + store <4 x i32> %data, ptr %dst, align 4 + ret void +} + +define void @masked_gather_base_plus_stride_v8i32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: add z1.s, z1.s, #28 // =0x1c +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1, z1.s, sxtw #2] +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1, z0.s, sxtw #2] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, ptr %src, <8 x i64> + %data = tail call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> , <8 x i32> undef) + store <8 x i32> %data, ptr %dst, align 4 + ret void +} + +; i64 +define void @masked_gather_base_plus_stride_v2i64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-32 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: index z0.d, #-2, x8 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1, z0.d, lsl #3] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i64, ptr %src, <2 x i64> + %data = tail call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> %ptrs, i32 8, <2 x i1> , <2 x i64> undef) + store <2 x i64> %data, ptr %dst, align 8 + ret void +} + +define void @masked_gather_base_plus_stride_v4i64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-32 +; CHECK-NEXT: mov z0.d, #-66 // =0xffffffffffffffbe +; CHECK-NEXT: mov z1.d, #-2 // =0xfffffffffffffffe +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: index z2.d, #0, x8 +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: add z1.d, z2.d, z1.d +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1, z1.d, lsl #3] +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1, z0.d, lsl #3] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr i64, ptr %src, <4 x i64> + %data = tail call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> , <4 x i64> undef) + store <4 x i64> %data, ptr %dst, align 8 + ret void +} + +; f16 +define void @masked_gather_base_plus_stride_v2f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI14_1 +; CHECK-NEXT: adrp x9, .LCPI14_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI14_1] +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI14_0] +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z1.s, sxtw #1] +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str w8, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = getelementptr half, ptr %src, <2 x i64> + %data = tail call <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr> %ptrs, i32 2, <2 x i1> , <2 x half> undef) + store <2 x half> %data, ptr %dst, align 2 + ret void +} + +define void @masked_gather_base_plus_stride_v4f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = getelementptr half, ptr %src, <4 x i64> + %data = tail call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> , <4 x half> undef) + store <4 x half> %data, ptr %dst, align 2 + ret void +} + +define void @masked_gather_base_plus_stride_v8f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: add z0.s, z0.s, #28 // =0x1c +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: strh w8, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z3.s, z0.s[1] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = getelementptr half, ptr %src, <8 x i64> + %data = tail call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> , <8 x half> undef) + store <8 x half> %data, ptr %dst, align 2 + ret void +} + +define void @masked_gather_base_plus_stride_v16f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: add z2.s, z2.s, #28 // =0x1c +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w8, [sp, #-32]! +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: ld1h { z2.s }, p0/z, [x1, z2.s, sxtw #1] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z1.s, z2.s[3] +; CHECK-NEXT: mov z3.s, z2.s[2] +; CHECK-NEXT: mov z4.s, z2.s[1] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: add z1.s, z1.s, #84 // =0x54 +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: add z0.s, z0.s, #56 // =0x38 +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1, z1.s, sxtw #1] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x1, z0.s, sxtw #1] +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z3.s, z0.s[1] +; CHECK-NEXT: strh w9, [sp, #28] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #26] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %ptrs = getelementptr half, ptr %src, <16 x i64> + %data = tail call <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr> %ptrs, i32 2, <16 x i1> , <16 x half> undef) + store <16 x half> %data, ptr %dst, align 2 + ret void +} + +; f32 +define void @masked_gather_base_plus_stride_v2f32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1, z0.s, sxtw #2] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr float, ptr %src, <2 x i64> + %data = tail call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> , <2 x float> undef) + store <2 x float> %data, ptr %dst, align 4 + ret void +} + +define void @masked_gather_base_plus_stride_v4f32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1, z0.s, sxtw #2] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr float, ptr %src, <4 x i64> + %data = tail call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> , <4 x float> undef) + store <4 x float> %data, ptr %dst, align 4 + ret void +} + +define void @masked_gather_base_plus_stride_v8f32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: add z1.s, z1.s, #28 // =0x1c +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1, z1.s, sxtw #2] +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x1, z0.s, sxtw #2] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr float, ptr %src, <8 x i64> + %data = tail call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> , <8 x float> undef) + store <8 x float> %data, ptr %dst, align 4 + ret void +} + +; f64 +define void @masked_gather_base_plus_stride_v2f64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-32 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: index z0.d, #-2, x8 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1, z0.d, lsl #3] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr double, ptr %src, <2 x i64> + %data = tail call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 8, <2 x i1> , <2 x double> undef) + store <2 x double> %data, ptr %dst, align 8 + ret void +} + +define void @masked_gather_base_plus_stride_v4f64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_gather_base_plus_stride_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-32 +; CHECK-NEXT: mov z0.d, #-66 // =0xffffffffffffffbe +; CHECK-NEXT: mov z1.d, #-2 // =0xfffffffffffffffe +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: index z2.d, #0, x8 +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: add z1.d, z2.d, z1.d +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1, z1.d, lsl #3] +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1, z0.d, lsl #3] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %ptrs = getelementptr double, ptr %src, <4 x i64> + %data = tail call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> , <4 x double> undef) + store <4 x double> %data, ptr %dst, align 8 + ret void +} + +; masked_scatter + +; i8 +define void @masked_scatter_base_plus_stride_v2i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrb w8, [x1, #1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrb w8, [x1] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1b { z0.s }, p0, [x0, z1.s, sxtw] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %data = load <2 x i8>, ptr %src, align 2 + %ptrs = getelementptr i8, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> %data, <2 x ptr> %ptrs, i32 1, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr s1, [x1] +; CHECK-NEXT: index z0.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uunpklo z1.h, z1.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z1.s }, p0, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %data = load <4 x i8>, ptr %src, align 2 + %ptrs = getelementptr i8, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %data, <4 x ptr> %ptrs, i32 1, <4 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v8i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: index z0.s, #0, #-7 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z2.b, z1.b[3] +; CHECK-NEXT: mov z3.b, z1.b[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[1] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: mov z2.b, z1.b[7] +; CHECK-NEXT: mov z3.b, z1.b[6] +; CHECK-NEXT: mov z5.b, z1.b[5] +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr d4, [sp] +; CHECK-NEXT: mov z1.b, z1.b[4] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: uunpklo z2.s, z4.h +; CHECK-NEXT: st1b { z2.s }, p0, [x0, z0.s, sxtw] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: mov z2.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: add z0.s, z0.s, z2.s +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: ldr d1, [sp, #8] +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z1.s }, p0, [x0, z0.s, sxtw] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %data = load <8 x i8>, ptr %src, align 2 + %ptrs = getelementptr i8, ptr %dst, <8 x i64> + tail call void @llvm.masked.scatter.v8i8.v8p0(<8 x i8> %data, <8 x ptr> %ptrs, i32 1, <8 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v16i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.b, z0.b[3] +; CHECK-NEXT: mov z3.b, z0.b[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z0.b[1] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: mov z2.b, z0.b[7] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: mov z4.b, z0.b[6] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr d3, [sp, #16] +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z6.b, z0.b[4] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: uunpklo z2.s, z3.h +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: st1b { z2.s }, p0, [x0, z1.s, sxtw] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: strh w9, [sp, #28] +; CHECK-NEXT: mov z2.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: strh w10, [sp, #26] +; CHECK-NEXT: add z1.s, z1.s, z2.s +; CHECK-NEXT: mov z4.b, z0.b[2] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d2, [sp, #24] +; CHECK-NEXT: mov z5.b, z0.b[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: index z3.s, #0, #7 +; CHECK-NEXT: mov z6.b, z0.b[5] +; CHECK-NEXT: uunpklo z2.s, z2.h +; CHECK-NEXT: st1b { z2.s }, p0, [x0, z1.s, sxtw] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: mov z1.b, z0.b[7] +; CHECK-NEXT: mov z5.d, z3.d +; CHECK-NEXT: mov z2.b, z0.b[6] +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr d4, [sp] +; CHECK-NEXT: mov z0.b, z0.b[4] +; CHECK-NEXT: add z5.s, z5.s, #7 // =0x7 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: add z3.s, z3.s, #35 // =0x23 +; CHECK-NEXT: uunpklo z1.s, z4.h +; CHECK-NEXT: st1b { z1.s }, p0, [x0, z5.s, sxtw] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: st1b { z0.s }, p0, [x0, z3.s, sxtw] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %data = load <16 x i8>, ptr %src, align 2 + %ptrs = getelementptr i8, ptr %dst, <16 x i64> + tail call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %data, <16 x ptr> %ptrs, i32 1, <16 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v32i8(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: ldp q2, q0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z3.b, z2.b[3] +; CHECK-NEXT: mov z4.b, z2.b[2] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z3.b, z2.b[1] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: mov z3.b, z2.b[7] +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: mov z5.b, z2.b[6] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: ldr d4, [sp, #16] +; CHECK-NEXT: mov z6.b, z2.b[5] +; CHECK-NEXT: mov z7.b, z2.b[4] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: uunpklo z3.s, z4.h +; CHECK-NEXT: mov z5.b, z2.b[3] +; CHECK-NEXT: st1b { z3.s }, p0, [x0, z1.s, sxtw] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strh w9, [sp, #28] +; CHECK-NEXT: strh w10, [sp, #26] +; CHECK-NEXT: mov z3.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: add z3.s, z1.s, z3.s +; CHECK-NEXT: mov z6.b, z2.b[2] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr d4, [sp, #24] +; CHECK-NEXT: mov z7.b, z2.b[1] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: mov z5.b, z2.b[5] +; CHECK-NEXT: uunpklo z4.s, z4.h +; CHECK-NEXT: st1b { z4.s }, p0, [x0, z3.s, sxtw] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: mov z3.b, z2.b[7] +; CHECK-NEXT: mov z7.s, #-56 // =0xffffffffffffffc8 +; CHECK-NEXT: mov z4.b, z2.b[6] +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: ldr d6, [sp] +; CHECK-NEXT: mov z2.b, z2.b[4] +; CHECK-NEXT: add z3.s, z1.s, z7.s +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: uunpklo z6.s, z6.h +; CHECK-NEXT: st1b { z6.s }, p0, [x0, z3.s, sxtw] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: mov z3.s, #-84 // =0xffffffffffffffac +; CHECK-NEXT: add z1.s, z1.s, z3.s +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uunpklo z2.s, z2.h +; CHECK-NEXT: st1b { z2.s }, p0, [x0, z1.s, sxtw] +; CHECK-NEXT: mov z1.b, z0.b[3] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.b, z0.b[2] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: strh w8, [sp, #48] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #54] +; CHECK-NEXT: adrp x9, .LCPI27_0 +; CHECK-NEXT: strh w10, [sp, #52] +; CHECK-NEXT: strh w8, [sp, #50] +; CHECK-NEXT: ldr d1, [sp, #48] +; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI27_0] +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z1.s }, p0, [x0, z2.s, sxtw] +; CHECK-NEXT: mov z1.b, z0.b[7] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.b, z0.b[6] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.b, z0.b[5] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: mov z1.b, z0.b[4] +; CHECK-NEXT: strh w8, [sp, #62] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #60] +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: strh w10, [sp, #58] +; CHECK-NEXT: index z2.s, #0, #7 +; CHECK-NEXT: strh w8, [sp, #56] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr d1, [sp, #56] +; CHECK-NEXT: mov z3.d, z2.d +; CHECK-NEXT: mov z4.b, z0.b[3] +; CHECK-NEXT: mov z5.b, z0.b[2] +; CHECK-NEXT: mov z6.b, z0.b[1] +; CHECK-NEXT: add z3.s, z3.s, #28 // =0x1c +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: st1b { z1.s }, p0, [x0, z3.s, sxtw] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strh w8, [sp, #32] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: strh w9, [sp, #38] +; CHECK-NEXT: mov z1.b, z0.b[7] +; CHECK-NEXT: strh w10, [sp, #36] +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: strh w8, [sp, #34] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr d4, [sp, #32] +; CHECK-NEXT: mov z3.b, z0.b[6] +; CHECK-NEXT: mov z6.b, z0.b[5] +; CHECK-NEXT: mov z0.b, z0.b[4] +; CHECK-NEXT: add z5.s, z5.s, #56 // =0x38 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: uunpklo z1.s, z4.h +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: st1b { z1.s }, p0, [x0, z5.s, sxtw] +; CHECK-NEXT: strh w8, [sp, #46] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #44] +; CHECK-NEXT: strh w10, [sp, #42] +; CHECK-NEXT: add z2.s, z2.s, #84 // =0x54 +; CHECK-NEXT: strh w8, [sp, #40] +; CHECK-NEXT: ldr d0, [sp, #40] +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: st1b { z0.s }, p0, [x0, z2.s, sxtw] +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %data = load <32 x i8>, ptr %src, align 2 + %ptrs = getelementptr i8, ptr %dst, <32 x i64> + tail call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %data, <32 x ptr> %ptrs, i32 1, <32 x i1> ) + ret void +} + +; i16 +define void @masked_scatter_base_plus_stride_v2i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w8, [x1, #2] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrh w8, [x1] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, sxtw #1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %data = load <2 x i16>, ptr %src, align 2 + %ptrs = getelementptr i16, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2i16.v2p0(<2 x i16> %data, <2 x ptr> %ptrs, i32 2, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: index z0.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1h { z1.s }, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <4 x i16>, ptr %src, align 2 + %ptrs = getelementptr i16, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %data, <4 x ptr> %ptrs, i32 2, <4 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v8i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: mov z3.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: add z3.s, z1.s, z3.s +; CHECK-NEXT: uunpklo z2.s, z0.h +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: st1h { z2.s }, p0, [x0, z1.s, sxtw #1] +; CHECK-NEXT: st1h { z0.s }, p0, [x0, z3.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <8 x i16>, ptr %src, align 2 + %ptrs = getelementptr i16, ptr %dst, <8 x i64> + tail call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %data, <8 x ptr> %ptrs, i32 2, <8 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v16i16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: index z0.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: index z3.s, #0, #7 +; CHECK-NEXT: mov z5.d, z3.d +; CHECK-NEXT: add z3.s, z3.s, #35 // =0x23 +; CHECK-NEXT: uunpklo z4.s, z1.h +; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8 +; CHECK-NEXT: st1h { z4.s }, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: mov z4.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: add z0.s, z0.s, z4.s +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1h { z1.s }, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: uunpklo z0.s, z2.h +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: add z5.s, z5.s, #7 // =0x7 +; CHECK-NEXT: uunpklo z1.s, z2.h +; CHECK-NEXT: st1h { z0.s }, p0, [x0, z5.s, sxtw #1] +; CHECK-NEXT: st1h { z1.s }, p0, [x0, z3.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <16 x i16>, ptr %src, align 2 + %ptrs = getelementptr i16, ptr %dst, <16 x i64> + tail call void @llvm.masked.scatter.v16i16.v16p0(<16 x i16> %data, <16 x ptr> %ptrs, i32 2, <16 x i1> ) + ret void +} + +; i32 +define void @masked_scatter_base_plus_stride_v2i32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, sxtw #2] +; CHECK-NEXT: ret + %data = load <2 x i32>, ptr %src, align 4 + %ptrs = getelementptr i32, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> %data, <2 x ptr> %ptrs, i32 4, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4i32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, sxtw #2] +; CHECK-NEXT: ret + %data = load <4 x i32>, ptr %src, align 4 + %ptrs = getelementptr i32, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data, <4 x ptr> %ptrs, i32 4, <4 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v8i32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: index z2.s, #0, #-7 +; CHECK-NEXT: mov z3.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: add z3.s, z2.s, z3.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0, z2.s, sxtw #2] +; CHECK-NEXT: st1w { z0.s }, p0, [x0, z3.s, sxtw #2] +; CHECK-NEXT: ret + %data = load <8 x i32>, ptr %src, align 4 + %ptrs = getelementptr i32, ptr %dst, <8 x i64> + tail call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %data, <8 x ptr> %ptrs, i32 4, <8 x i1> ) + ret void +} + +; i64 +define void @masked_scatter_base_plus_stride_v2i64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.d, #-2, #3 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, lsl #3] +; CHECK-NEXT: ret + %data = load <2 x i64>, ptr %src, align 8 + %ptrs = getelementptr i64, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %data, <2 x ptr> %ptrs, i32 8, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4i64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: mov z0.d, #-2 // =0xfffffffffffffffe +; CHECK-NEXT: index z3.d, #0, #3 +; CHECK-NEXT: add z0.d, z3.d, z0.d +; CHECK-NEXT: add z3.d, z3.d, #4 // =0x4 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: st1d { z2.d }, p0, [x0, z0.d, lsl #3] +; CHECK-NEXT: st1d { z1.d }, p0, [x0, z3.d, lsl #3] +; CHECK-NEXT: ret + %data = load <4 x i64>, ptr %src, align 8 + %ptrs = getelementptr i64, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> %data, <4 x ptr> %ptrs, i32 8, <4 x i1> ) + ret void +} + +; f16 +define void @masked_scatter_base_plus_stride_v2f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI37_1 +; CHECK-NEXT: adrp x9, .LCPI37_0 +; CHECK-NEXT: ldr s0, [x1] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI37_1] +; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI37_0] +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; CHECK-NEXT: st1h { z0.s }, p0, [x0, z2.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <2 x half>, ptr %src, align 2 + %ptrs = getelementptr half, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2f16.v2p0(<2 x half> %data, <2 x ptr> %ptrs, i32 2, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: index z0.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1h { z1.s }, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <4 x half>, ptr %src, align 2 + %ptrs = getelementptr half, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %data, <4 x ptr> %ptrs, i32 2, <4 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v8f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: mov z3.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: add z3.s, z1.s, z3.s +; CHECK-NEXT: uunpklo z2.s, z0.h +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: st1h { z2.s }, p0, [x0, z1.s, sxtw #1] +; CHECK-NEXT: st1h { z0.s }, p0, [x0, z3.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <8 x half>, ptr %src, align 2 + %ptrs = getelementptr half, ptr %dst, <8 x i64> + tail call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %data, <8 x ptr> %ptrs, i32 2, <8 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v16f16(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: index z0.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: index z3.s, #0, #7 +; CHECK-NEXT: mov z5.d, z3.d +; CHECK-NEXT: add z3.s, z3.s, #35 // =0x23 +; CHECK-NEXT: uunpklo z4.s, z1.h +; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8 +; CHECK-NEXT: st1h { z4.s }, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: mov z4.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: add z0.s, z0.s, z4.s +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1h { z1.s }, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: uunpklo z0.s, z2.h +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: add z5.s, z5.s, #7 // =0x7 +; CHECK-NEXT: uunpklo z1.s, z2.h +; CHECK-NEXT: st1h { z0.s }, p0, [x0, z5.s, sxtw #1] +; CHECK-NEXT: st1h { z1.s }, p0, [x0, z3.s, sxtw #1] +; CHECK-NEXT: ret + %data = load <16 x half>, ptr %src, align 2 + %ptrs = getelementptr half, ptr %dst, <16 x i64> + tail call void @llvm.masked.scatter.v16f16.v16p0(<16 x half> %data, <16 x ptr> %ptrs, i32 2, <16 x i1> ) + ret void +} + +; f32 +define void @masked_scatter_base_plus_stride_v2f32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, sxtw #2] +; CHECK-NEXT: ret + %data = load <2 x float>, ptr %src, align 4 + %ptrs = getelementptr float, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> %data, <2 x ptr> %ptrs, i32 4, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4f32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.s, #0, #-7 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, sxtw #2] +; CHECK-NEXT: ret + %data = load <4 x float>, ptr %src, align 4 + %ptrs = getelementptr float, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %data, <4 x ptr> %ptrs, i32 4, <4 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v8f32(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: index z2.s, #0, #-7 +; CHECK-NEXT: mov z3.s, #-28 // =0xffffffffffffffe4 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: add z3.s, z2.s, z3.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0, z2.s, sxtw #2] +; CHECK-NEXT: st1w { z0.s }, p0, [x0, z3.s, sxtw #2] +; CHECK-NEXT: ret + %data = load <8 x float>, ptr %src, align 4 + %ptrs = getelementptr float, ptr %dst, <8 x i64> + tail call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> %data, <8 x ptr> %ptrs, i32 4, <8 x i1> ) + ret void +} + +; f64 +define void @masked_scatter_base_plus_stride_v2f64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: index z1.d, #-2, #3 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, lsl #3] +; CHECK-NEXT: ret + %data = load <2 x double>, ptr %src, align 8 + %ptrs = getelementptr double, ptr %dst, <2 x i64> + tail call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %data, <2 x ptr> %ptrs, i32 8, <2 x i1> ) + ret void +} + +define void @masked_scatter_base_plus_stride_v4f64(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: masked_scatter_base_plus_stride_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: mov z0.d, #-2 // =0xfffffffffffffffe +; CHECK-NEXT: index z3.d, #0, #3 +; CHECK-NEXT: add z0.d, z3.d, z0.d +; CHECK-NEXT: add z3.d, z3.d, #4 // =0x4 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: st1d { z2.d }, p0, [x0, z0.d, lsl #3] +; CHECK-NEXT: st1d { z1.d }, p0, [x0, z3.d, lsl #3] +; CHECK-NEXT: ret + %data = load <4 x double>, ptr %src, align 8 + %ptrs = getelementptr double, ptr %dst, <4 x i64> + tail call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %data, <4 x ptr> %ptrs, i32 8, <4 x i1> ) + ret void +} + +declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x i8>) +declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x i8>) +declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i8>) +declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i8>) +declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) +declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x i16>) +declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x i16>) +declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i16>) +declare <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i16>) +declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x i32>) +declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x i32>) +declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i32>) +declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x i64>) +declare <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x i64>) +declare <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x half>) +declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x half>) +declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x half>) +declare <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x half>) +declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x float>) +declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x float>) +declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x float>) +declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x double>) +declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr>, i32 immarg, <4 x i1>, <4 x double>) + +declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32 immarg, <4 x i1>) +declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32 immarg, <8 x i1>) +declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32 immarg, <16 x i1>) +declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32 immarg, <32 x i1>) +declare void @llvm.masked.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32 immarg, <4 x i1>) +declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32 immarg, <8 x i1>) +declare void @llvm.masked.scatter.v16i16.v16p0(<16 x i16>, <16 x ptr>, i32 immarg, <16 x i1>) +declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32 immarg, <4 x i1>) +declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32 immarg, <8 x i1>) +declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, i32 immarg, <4 x i1>) +declare void @llvm.masked.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32 immarg, <4 x i1>) +declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32 immarg, <8 x i1>) +declare void @llvm.masked.scatter.v16f16.v16p0(<16 x half>, <16 x ptr>, i32 immarg, <16 x i1>) +declare void @llvm.masked.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32 immarg, <4 x i1>) +declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32 immarg, <8 x i1>) +declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32 immarg, <2 x i1>) +declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32 immarg, <4 x i1>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll @@ -0,0 +1,191 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; i8 +define <4 x i8> @vls_sve_and_4xi8(<4 x i8> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_4xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %c = and <4 x i8> %b, + ret <4 x i8> %c +} + +define <8 x i8> @vls_sve_and_8xi8(<8 x i8> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_8xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %c = and <8 x i8> %b, + ret <8 x i8> %c +} + +define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_16xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %c = and <16 x i8> %b, + ret <16 x i8> %c +} + +define <32 x i8> @vls_sve_and_32xi8(<32 x i8> %ap) nounwind #0 { +; CHECK-LABEL: vls_sve_and_32xi8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %b = and <32 x i8> %ap, + ret <32 x i8> %b +} + +; i16 +define <2 x i16> @vls_sve_and_2xi16(<2 x i16> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_2xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: stp wzr, w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %c = and <2 x i16> %b, + ret <2 x i16> %c +} + +define <4 x i16> @vls_sve_and_4xi16(<4 x i16> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_4xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %c = and <4 x i16> %b, + ret <4 x i16> %c +} + +define <8 x i16> @vls_sve_and_8xi16(<8 x i16> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_8xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %c = and <8 x i16> %b, + ret <8 x i16> %c +} + +define <16 x i16> @vls_sve_and_16xi16(<16 x i16> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_16xi16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %c = and <16 x i16> %b, + ret <16 x i16> %c +} + +; i32 +define <2 x i32> @vls_sve_and_2xi32(<2 x i32> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_2xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: index z1.s, #0, #-1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %c = and <2 x i32> %b, + ret <2 x i32> %c +} + +define <4 x i32> @vls_sve_and_4xi32(<4 x i32> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_4xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %c = and <4 x i32> %b, + ret <4 x i32> %c +} + +define <8 x i32> @vls_sve_and_8xi32(<8 x i32> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_8xi32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI10_0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI10_0] +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %c = and <8 x i32> %b, + ret <8 x i32> %c +} + +; i64 +define <2 x i64> @vls_sve_and_2xi64(<2 x i64> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_2xi64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: index z1.d, #0, #-1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %c = and <2 x i64> %b, + ret <2 x i64> %c +} + +define <4 x i64> @vls_sve_and_4xi64(<4 x i64> %b) nounwind #0 { +; CHECK-LABEL: vls_sve_and_4xi64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: index z2.d, #0, #-1 +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %c = and <4 x i64> %b, + ret <4 x i64> %c +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll @@ -0,0 +1,198 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @bitcast_v4i8(<4 x i8> *%a, <4 x i8>* %b) #0 { +; CHECK-LABEL: bitcast_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %load = load volatile <4 x i8>, <4 x i8>* %a + %cast = bitcast <4 x i8> %load to <4 x i8> + store volatile <4 x i8> %cast, <4 x i8>* %b + ret void +} + +define void @bitcast_v8i8(<8 x i8> *%a, <8 x i8>* %b) #0 { +; CHECK-LABEL: bitcast_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %load = load volatile <8 x i8>, <8 x i8>* %a + %cast = bitcast <8 x i8> %load to <8 x i8> + store volatile <8 x i8> %cast, <8 x i8>* %b + ret void +} + +define void @bitcast_v16i8(<16 x i8> *%a, <16 x i8>* %b) #0 { +; CHECK-LABEL: bitcast_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <16 x i8>, <16 x i8>* %a + %cast = bitcast <16 x i8> %load to <16 x i8> + store volatile <16 x i8> %cast, <16 x i8>* %b + ret void +} + +define void @bitcast_v32i8(<32 x i8> *%a, <32 x i8>* %b) #0 { +; CHECK-LABEL: bitcast_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: str q1, [x1, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <32 x i8>, <32 x i8>* %a + %cast = bitcast <32 x i8> %load to <32 x i8> + store volatile <32 x i8> %cast, <32 x i8>* %b + ret void +} + +define void @bitcast_v2i16(<2 x i16> *%a, <2 x half>* %b) #0 { +; CHECK-LABEL: bitcast_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w8, [x0, #2] +; CHECK-NEXT: str w8, [sp, #4] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp] +; CHECK-NEXT: ldr d0, [sp] +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str w8, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %load = load volatile <2 x i16>, <2 x i16>* %a + %cast = bitcast <2 x i16> %load to <2 x half> + store volatile <2 x half> %cast, <2 x half>* %b + ret void +} + +define void @bitcast_v4i16(<4 x i16> *%a, <4 x half>* %b) #0 { +; CHECK-LABEL: bitcast_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %load = load volatile <4 x i16>, <4 x i16>* %a + %cast = bitcast <4 x i16> %load to <4 x half> + store volatile <4 x half> %cast, <4 x half>* %b + ret void +} + +define void @bitcast_v8i16(<8 x i16> *%a, <8 x half>* %b) #0 { +; CHECK-LABEL: bitcast_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <8 x i16>, <8 x i16>* %a + %cast = bitcast <8 x i16> %load to <8 x half> + store volatile <8 x half> %cast, <8 x half>* %b + ret void +} + +define void @bitcast_v16i16(<16 x i16> *%a, <16 x half>* %b) #0 { +; CHECK-LABEL: bitcast_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: str q1, [x1, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <16 x i16>, <16 x i16>* %a + %cast = bitcast <16 x i16> %load to <16 x half> + store volatile <16 x half> %cast, <16 x half>* %b + ret void +} + +define void @bitcast_v2i32(<2 x i32> *%a, <2 x float>* %b) #0 { +; CHECK-LABEL: bitcast_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %load = load volatile <2 x i32>, <2 x i32>* %a + %cast = bitcast <2 x i32> %load to <2 x float> + store volatile <2 x float> %cast, <2 x float>* %b + ret void +} + +define void @bitcast_v4i32(<4 x i32> *%a, <4 x float>* %b) #0 { +; CHECK-LABEL: bitcast_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <4 x i32>, <4 x i32>* %a + %cast = bitcast <4 x i32> %load to <4 x float> + store volatile <4 x float> %cast, <4 x float>* %b + ret void +} + +define void @bitcast_v8i32(<8 x i32> *%a, <8 x float>* %b) #0 { +; CHECK-LABEL: bitcast_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: str q1, [x1, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <8 x i32>, <8 x i32>* %a + %cast = bitcast <8 x i32> %load to <8 x float> + store volatile <8 x float> %cast, <8 x float>* %b + ret void +} + +define void @bitcast_v1i64(<1 x i64> *%a, <1 x double>* %b) #0 { +; CHECK-LABEL: bitcast_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %load = load volatile <1 x i64>, <1 x i64>* %a + %cast = bitcast <1 x i64> %load to <1 x double> + store volatile <1 x double> %cast, <1 x double>* %b + ret void +} + +define void @bitcast_v2i64(<2 x i64> *%a, <2 x double>* %b) #0 { +; CHECK-LABEL: bitcast_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <2 x i64>, <2 x i64>* %a + %cast = bitcast <2 x i64> %load to <2 x double> + store volatile <2 x double> %cast, <2 x double>* %b + ret void +} + +define void @bitcast_v4i64(<4 x i64> *%a, <4 x double>* %b) #0 { +; CHECK-LABEL: bitcast_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: str q1, [x1, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %load = load volatile <4 x i64>, <4 x i64>* %a + %cast = bitcast <4 x i64> %load to <4 x double> + store volatile <4 x double> %cast, <4 x double>* %b + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll @@ -0,0 +1,464 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; ICMP EQ +; + +define <4 x i8> @icmp_eq_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i8> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i8> + ret <4 x i8> %sext +} + +define <8 x i8> @icmp_eq_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <8 x i8> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i8> + ret <8 x i8> %sext +} + +define <16 x i8> @icmp_eq_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <16 x i8> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} + +define void @icmp_eq_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: icmp_eq_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z2.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %cmp = icmp eq <32 x i8> %op1, %op2 + %sext = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %sext, <32 x i8>* %a + ret void +} + +define <2 x i16> @icmp_eq_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i16> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i16> + ret <2 x i16> %sext +} + +define <4 x i16> @icmp_eq_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i16> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i16> + ret <4 x i16> %sext +} + +define <8 x i16> @icmp_eq_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <8 x i16> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} + +define void @icmp_eq_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_eq_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp eq <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +define <2 x i32> @icmp_eq_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i32> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %sext +} + +define <4 x i32> @icmp_eq_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i32> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} + +define void @icmp_eq_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_eq_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z2.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp eq <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +define <1 x i64> @icmp_eq_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +; CHECK-LABEL: icmp_eq_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <1 x i64> %op1, %op2 + %sext = sext <1 x i1> %cmp to <1 x i64> + ret <1 x i64> %sext +} + +define <2 x i64> @icmp_eq_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i64> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i64> + ret <2 x i64> %sext +} + +define void @icmp_eq_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_eq_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp eq <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP NE +; + +define void @icmp_ne_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: icmp_ne_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpne p1.b, p0/z, z0.b, z2.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %cmp = icmp ne <32 x i8> %op1, %op2 + %sext = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %sext, <32 x i8>* %a + ret void +} + +; +; ICMP SGE +; + +define void @icmp_sge_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_sge_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpge p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpge p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp sge <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +; +; ICMP SGT +; + +define void @icmp_sgt_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_sgt_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpgt p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp sgt <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +; +; ICMP SLE +; + +define void @icmp_sle_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_sle_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpge p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpge p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp sle <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +; +; ICMP SLT +; + +define void @icmp_slt_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_slt_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp slt <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +; +; ICMP UGE +; + +define void @icmp_uge_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_uge_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphs p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphs p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp uge <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP UGT +; + +define void @icmp_ugt_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ugt_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ugt <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP ULE +; + +define void @icmp_ule_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ule_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphs p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphs p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ule <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP ULT +; + +define void @icmp_ult_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ult_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ult <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; == Matching first N elements == + +define <4 x i1> @reshuffle_v4i1_nxv4i1( %a) #0 { +; CHECK-LABEL: reshuffle_v4i1_nxv4i1: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: strh w11, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %el0 = extractelement %a, i32 0 + %el1 = extractelement %a, i32 1 + %el2 = extractelement %a, i32 2 + %el3 = extractelement %a, i32 3 + %v0 = insertelement <4 x i1> undef, i1 %el0, i32 0 + %v1 = insertelement <4 x i1> %v0, i1 %el1, i32 1 + %v2 = insertelement <4 x i1> %v1, i1 %el2, i32 2 + %v3 = insertelement <4 x i1> %v2, i1 %el3, i32 3 + ret <4 x i1> %v3 +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll @@ -0,0 +1,618 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; RBIT +; + +define <4 x i8> @bitreverse_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: bitreverse_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.bitreverse.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @bitreverse_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: bitreverse_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @bitreverse_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: bitreverse_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @bitreverse_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: bitreverse_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: rbit z1.b, p0/m, z1.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @bitreverse_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: bitreverse_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @bitreverse_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: bitreverse_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.bitreverse.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @bitreverse_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: bitreverse_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @bitreverse_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: bitreverse_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: rbit z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @bitreverse_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: bitreverse_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @bitreverse_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: bitreverse_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @bitreverse_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: bitreverse_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: rbit z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @bitreverse_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: bitreverse_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.bitreverse.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @bitreverse_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: bitreverse_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @bitreverse_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: bitreverse_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: rbit z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; REVB +; + +define <2 x i16> @bswap_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: bswap_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: adrp x9, .LCPI14_1 +; CHECK-NEXT: adrp x10, .LCPI14_2 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: adrp x8, .LCPI14_3 +; CHECK-NEXT: ldr d2, [x9, :lo12:.LCPI14_1] +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: lsr z5.s, p0/m, z5.s, z1.s +; CHECK-NEXT: ldr d3, [x10, :lo12:.LCPI14_2] +; CHECK-NEXT: movprfx z6, z0 +; CHECK-NEXT: lsr z6.s, p0/m, z6.s, z2.s +; CHECK-NEXT: ldr d4, [x8, :lo12:.LCPI14_3] +; CHECK-NEXT: adrp x8, .LCPI14_4 +; CHECK-NEXT: lslr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: and z2.d, z6.d, z3.d +; CHECK-NEXT: and z0.d, z0.d, z4.d +; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI14_4] +; CHECK-NEXT: orr z2.d, z2.d, z5.d +; CHECK-NEXT: orr z0.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z3.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @bswap_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: bswap_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI15_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI15_0] +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: lsr z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @bswap_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: bswap_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI16_0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: lsr z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @bswap_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: bswap_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI17_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z1.h +; CHECK-NEXT: movprfx z4, z2 +; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z1.h +; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: orr z1.d, z2.d, z4.d +; CHECK-NEXT: orr z0.d, z0.d, z3.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @bswap_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: bswap_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI18_1 +; CHECK-NEXT: adrp x9, .LCPI18_2 +; CHECK-NEXT: adrp x10, .LCPI18_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI18_1] +; CHECK-NEXT: adrp x8, .LCPI18_3 +; CHECK-NEXT: ldr d2, [x9, :lo12:.LCPI18_2] +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: lsr z5.s, p0/m, z5.s, z1.s +; CHECK-NEXT: ldr d3, [x10, :lo12:.LCPI18_0] +; CHECK-NEXT: ldr d4, [x8, :lo12:.LCPI18_3] +; CHECK-NEXT: and z2.d, z5.d, z2.d +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: lsr z5.s, p0/m, z5.s, z3.s +; CHECK-NEXT: lslr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z3.s +; CHECK-NEXT: and z1.d, z1.d, z4.d +; CHECK-NEXT: orr z2.d, z2.d, z5.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @bswap_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: bswap_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI19_1 +; CHECK-NEXT: adrp x9, .LCPI19_2 +; CHECK-NEXT: adrp x10, .LCPI19_0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI19_1] +; CHECK-NEXT: adrp x8, .LCPI19_3 +; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI19_2] +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: lsr z5.s, p0/m, z5.s, z1.s +; CHECK-NEXT: ldr q3, [x10, :lo12:.LCPI19_0] +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI19_3] +; CHECK-NEXT: and z2.d, z5.d, z2.d +; CHECK-NEXT: movprfx z5, z0 +; CHECK-NEXT: lsr z5.s, p0/m, z5.s, z3.s +; CHECK-NEXT: lslr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z3.s +; CHECK-NEXT: and z1.d, z1.d, z4.d +; CHECK-NEXT: orr z2.d, z2.d, z5.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @bswap_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: bswap_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI20_0 +; CHECK-NEXT: adrp x9, .LCPI20_1 +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI20_0] +; CHECK-NEXT: adrp x8, .LCPI20_2 +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI20_1] +; CHECK-NEXT: movprfx z5, z2 +; CHECK-NEXT: lsr z5.s, p0/m, z5.s, z0.s +; CHECK-NEXT: movprfx z6, z2 +; CHECK-NEXT: lsr z6.s, p0/m, z6.s, z1.s +; CHECK-NEXT: movprfx z7, z2 +; CHECK-NEXT: lsl z7.s, p0/m, z7.s, z0.s +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI20_2] +; CHECK-NEXT: adrp x8, .LCPI20_3 +; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: and z6.d, z6.d, z4.d +; CHECK-NEXT: ldr q16, [x8, :lo12:.LCPI20_3] +; CHECK-NEXT: orr z5.d, z6.d, z5.d +; CHECK-NEXT: movprfx z6, z3 +; CHECK-NEXT: lsr z6.s, p0/m, z6.s, z1.s +; CHECK-NEXT: and z4.d, z6.d, z4.d +; CHECK-NEXT: movprfx z6, z3 +; CHECK-NEXT: lsr z6.s, p0/m, z6.s, z0.s +; CHECK-NEXT: lslr z0.s, p0/m, z0.s, z3.s +; CHECK-NEXT: lslr z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: and z2.d, z2.d, z16.d +; CHECK-NEXT: and z1.d, z1.d, z16.d +; CHECK-NEXT: orr z3.d, z4.d, z6.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: orr z1.d, z7.d, z2.d +; CHECK-NEXT: orr z0.d, z0.d, z3.d +; CHECK-NEXT: orr z1.d, z1.d, z5.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @bswap_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: bswap_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #56 +; CHECK-NEXT: mov w9, #40 +; CHECK-NEXT: mov w10, #65280 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: fmov d1, x8 +; CHECK-NEXT: mov w8, #24 +; CHECK-NEXT: fmov d2, x9 +; CHECK-NEXT: mov w9, #16711680 +; CHECK-NEXT: fmov d3, x10 +; CHECK-NEXT: mov w10, #8 +; CHECK-NEXT: fmov d4, x8 +; CHECK-NEXT: mov w8, #-16777216 +; CHECK-NEXT: fmov d5, x9 +; CHECK-NEXT: mov x9, #1095216660480 +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z2.d +; CHECK-NEXT: and z3.d, z16.d, z3.d +; CHECK-NEXT: fmov d7, x8 +; CHECK-NEXT: mov x8, #280375465082880 +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z4.d +; CHECK-NEXT: fmov d6, x10 +; CHECK-NEXT: and z5.d, z16.d, z5.d +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z6.d +; CHECK-NEXT: fmov d18, x8 +; CHECK-NEXT: mov x8, #71776119061217280 +; CHECK-NEXT: and z7.d, z16.d, z7.d +; CHECK-NEXT: fmov d17, x9 +; CHECK-NEXT: orr z5.d, z7.d, z5.d +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z1.d +; CHECK-NEXT: fmov d7, x8 +; CHECK-NEXT: lslr z6.d, p0/m, z6.d, z0.d +; CHECK-NEXT: lslr z4.d, p0/m, z4.d, z0.d +; CHECK-NEXT: lslr z2.d, p0/m, z2.d, z0.d +; CHECK-NEXT: and z6.d, z6.d, z17.d +; CHECK-NEXT: and z4.d, z4.d, z18.d +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: and z1.d, z2.d, z7.d +; CHECK-NEXT: orr z3.d, z3.d, z16.d +; CHECK-NEXT: orr z2.d, z4.d, z6.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: orr z1.d, z5.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.bswap.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @bswap_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: bswap_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI22_0 +; CHECK-NEXT: adrp x9, .LCPI22_1 +; CHECK-NEXT: adrp x10, .LCPI22_2 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI22_0] +; CHECK-NEXT: adrp x8, .LCPI22_3 +; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI22_1] +; CHECK-NEXT: adrp x9, .LCPI22_4 +; CHECK-NEXT: ldr q3, [x10, :lo12:.LCPI22_2] +; CHECK-NEXT: adrp x10, .LCPI22_5 +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI22_3] +; CHECK-NEXT: adrp x8, .LCPI22_6 +; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI22_4] +; CHECK-NEXT: adrp x9, .LCPI22_7 +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z2.d +; CHECK-NEXT: and z3.d, z16.d, z3.d +; CHECK-NEXT: ldr q7, [x8, :lo12:.LCPI22_6] +; CHECK-NEXT: adrp x8, .LCPI22_8 +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z4.d +; CHECK-NEXT: ldr q6, [x10, :lo12:.LCPI22_5] +; CHECK-NEXT: and z5.d, z16.d, z5.d +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z6.d +; CHECK-NEXT: ldr q18, [x8, :lo12:.LCPI22_8] +; CHECK-NEXT: adrp x8, .LCPI22_9 +; CHECK-NEXT: and z7.d, z16.d, z7.d +; CHECK-NEXT: ldr q17, [x9, :lo12:.LCPI22_7] +; CHECK-NEXT: orr z5.d, z7.d, z5.d +; CHECK-NEXT: movprfx z16, z0 +; CHECK-NEXT: lsr z16.d, p0/m, z16.d, z1.d +; CHECK-NEXT: ldr q7, [x8, :lo12:.LCPI22_9] +; CHECK-NEXT: lslr z6.d, p0/m, z6.d, z0.d +; CHECK-NEXT: lslr z4.d, p0/m, z4.d, z0.d +; CHECK-NEXT: lslr z2.d, p0/m, z2.d, z0.d +; CHECK-NEXT: and z6.d, z6.d, z17.d +; CHECK-NEXT: and z4.d, z4.d, z18.d +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: and z1.d, z2.d, z7.d +; CHECK-NEXT: orr z3.d, z3.d, z16.d +; CHECK-NEXT: orr z2.d, z4.d, z6.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: orr z1.d, z5.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @bswap_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: bswap_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI23_0 +; CHECK-NEXT: adrp x9, .LCPI23_1 +; CHECK-NEXT: adrp x10, .LCPI23_2 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI23_0] +; CHECK-NEXT: adrp x8, .LCPI23_4 +; CHECK-NEXT: ldr q2, [x9, :lo12:.LCPI23_1] +; CHECK-NEXT: adrp x9, .LCPI23_3 +; CHECK-NEXT: ldr q4, [x10, :lo12:.LCPI23_2] +; CHECK-NEXT: adrp x10, .LCPI23_5 +; CHECK-NEXT: ldr q7, [x8, :lo12:.LCPI23_4] +; CHECK-NEXT: adrp x8, .LCPI23_6 +; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI23_3] +; CHECK-NEXT: adrp x9, .LCPI23_7 +; CHECK-NEXT: movprfx z6, z1 +; CHECK-NEXT: lsr z6.d, p0/m, z6.d, z2.d +; CHECK-NEXT: movprfx z17, z1 +; CHECK-NEXT: lsr z17.d, p0/m, z17.d, z3.d +; CHECK-NEXT: ldr q18, [x8, :lo12:.LCPI23_6] +; CHECK-NEXT: adrp x8, .LCPI23_8 +; CHECK-NEXT: and z6.d, z6.d, z4.d +; CHECK-NEXT: ldr q16, [x10, :lo12:.LCPI23_5] +; CHECK-NEXT: orr z6.d, z6.d, z17.d +; CHECK-NEXT: ldr q17, [x9, :lo12:.LCPI23_7] +; CHECK-NEXT: ldr q21, [x8, :lo12:.LCPI23_8] +; CHECK-NEXT: adrp x8, .LCPI23_9 +; CHECK-NEXT: movprfx z19, z1 +; CHECK-NEXT: lsr z19.d, p0/m, z19.d, z5.d +; CHECK-NEXT: movprfx z20, z1 +; CHECK-NEXT: lsr z20.d, p0/m, z20.d, z16.d +; CHECK-NEXT: and z19.d, z19.d, z7.d +; CHECK-NEXT: and z20.d, z20.d, z18.d +; CHECK-NEXT: orr z19.d, z20.d, z19.d +; CHECK-NEXT: movprfx z20, z1 +; CHECK-NEXT: lsl z20.d, p0/m, z20.d, z16.d +; CHECK-NEXT: movprfx z22, z1 +; CHECK-NEXT: lsl z22.d, p0/m, z22.d, z5.d +; CHECK-NEXT: ldr q23, [x8, :lo12:.LCPI23_9] +; CHECK-NEXT: and z20.d, z20.d, z17.d +; CHECK-NEXT: and z22.d, z22.d, z21.d +; CHECK-NEXT: orr z6.d, z19.d, z6.d +; CHECK-NEXT: orr z19.d, z22.d, z20.d +; CHECK-NEXT: movprfx z20, z1 +; CHECK-NEXT: lsl z20.d, p0/m, z20.d, z3.d +; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z2.d +; CHECK-NEXT: movprfx z22, z0 +; CHECK-NEXT: lsr z22.d, p0/m, z22.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z23.d +; CHECK-NEXT: and z4.d, z22.d, z4.d +; CHECK-NEXT: movprfx z22, z0 +; CHECK-NEXT: lsr z22.d, p0/m, z22.d, z3.d +; CHECK-NEXT: orr z1.d, z20.d, z1.d +; CHECK-NEXT: orr z4.d, z4.d, z22.d +; CHECK-NEXT: movprfx z20, z0 +; CHECK-NEXT: lsr z20.d, p0/m, z20.d, z5.d +; CHECK-NEXT: movprfx z22, z0 +; CHECK-NEXT: lsr z22.d, p0/m, z22.d, z16.d +; CHECK-NEXT: lslr z16.d, p0/m, z16.d, z0.d +; CHECK-NEXT: lslr z5.d, p0/m, z5.d, z0.d +; CHECK-NEXT: lslr z2.d, p0/m, z2.d, z0.d +; CHECK-NEXT: and z7.d, z20.d, z7.d +; CHECK-NEXT: and z18.d, z22.d, z18.d +; CHECK-NEXT: and z16.d, z16.d, z17.d +; CHECK-NEXT: and z5.d, z5.d, z21.d +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z3.d +; CHECK-NEXT: and z2.d, z2.d, z23.d +; CHECK-NEXT: orr z7.d, z18.d, z7.d +; CHECK-NEXT: orr z3.d, z5.d, z16.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: orr z2.d, z7.d, z4.d +; CHECK-NEXT: orr z0.d, z0.d, z3.d +; CHECK-NEXT: orr z1.d, z1.d, z19.d +; CHECK-NEXT: orr z0.d, z0.d, z2.d +; CHECK-NEXT: orr z1.d, z1.d, z6.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } + +declare <4 x i8> @llvm.bitreverse.v4i8(<4 x i8>) +declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>) +declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>) +declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>) +declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) +declare <4 x i16> @llvm.bitreverse.v4i16(<4 x i16>) +declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) +declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) +declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>) +declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) +declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) +declare <1 x i64> @llvm.bitreverse.v1i64(<1 x i64>) +declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) +declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) + +declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>) +declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>) +declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>) +declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>) +declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) +declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) +declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>) +declare <1 x i64> @llvm.bswap.v1i64(<1 x i64>) +declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) +declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll @@ -0,0 +1,195 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define <4 x i8> @sdiv_v4i8(<4 x i8> %op1) #0 { +; CHECK-LABEL: sdiv_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <4 x i8> %op1, shufflevector (<4 x i8> insertelement (<4 x i8> poison, i8 32, i32 0), <4 x i8> poison, <4 x i32> zeroinitializer) + ret <4 x i8> %res +} + +define <8 x i8> @sdiv_v8i8(<8 x i8> %op1) #0 { +; CHECK-LABEL: sdiv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #5 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <8 x i8> %op1, shufflevector (<8 x i8> insertelement (<8 x i8> poison, i8 32, i32 0), <8 x i8> poison, <8 x i32> zeroinitializer) + ret <8 x i8> %res +} + +define <16 x i8> @sdiv_v16i8(<16 x i8> %op1) #0 { +; CHECK-LABEL: sdiv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #5 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <16 x i8> %op1, shufflevector (<16 x i8> insertelement (<16 x i8> poison, i8 32, i32 0), <16 x i8> poison, <16 x i32> zeroinitializer) + ret <16 x i8> %res +} + +define void @sdiv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: sdiv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #5 +; CHECK-NEXT: asrd z1.b, p0/m, z1.b, #5 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %res = sdiv <32 x i8> %op1, shufflevector (<32 x i8> insertelement (<32 x i8> poison, i8 32, i32 0), <32 x i8> poison, <32 x i32> zeroinitializer) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @sdiv_v2i16(<2 x i16> %op1) #0 { +; CHECK-LABEL: sdiv_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <2 x i16> %op1, shufflevector (<2 x i16> insertelement (<2 x i16> poison, i16 32, i32 0), <2 x i16> poison, <2 x i32> zeroinitializer) + ret <2 x i16> %res +} + +define <4 x i16> @sdiv_v4i16(<4 x i16> %op1) #0 { +; CHECK-LABEL: sdiv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <4 x i16> %op1, shufflevector (<4 x i16> insertelement (<4 x i16> poison, i16 32, i32 0), <4 x i16> poison, <4 x i32> zeroinitializer) + ret <4 x i16> %res +} + +define <8 x i16> @sdiv_v8i16(<8 x i16> %op1) #0 { +; CHECK-LABEL: sdiv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <8 x i16> %op1, shufflevector (<8 x i16> insertelement (<8 x i16> poison, i16 32, i32 0), <8 x i16> poison, <8 x i32> zeroinitializer) + ret <8 x i16> %res +} + +define void @sdiv_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: sdiv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #5 +; CHECK-NEXT: asrd z1.h, p0/m, z1.h, #5 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %res = sdiv <16 x i16> %op1, shufflevector (<16 x i16> insertelement (<16 x i16> poison, i16 32, i32 0), <16 x i16> poison, <16 x i32> zeroinitializer) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @sdiv_v2i32(<2 x i32> %op1) #0 { +; CHECK-LABEL: sdiv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <2 x i32> %op1, shufflevector (<2 x i32> insertelement (<2 x i32> poison, i32 32, i32 0), <2 x i32> poison, <2 x i32> zeroinitializer) + ret <2 x i32> %res +} + +define <4 x i32> @sdiv_v4i32(<4 x i32> %op1) #0 { +; CHECK-LABEL: sdiv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <4 x i32> %op1, shufflevector (<4 x i32> insertelement (<4 x i32> poison, i32 32, i32 0), <4 x i32> poison, <4 x i32> zeroinitializer) + ret <4 x i32> %res +} + +define void @sdiv_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: sdiv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #5 +; CHECK-NEXT: asrd z1.s, p0/m, z1.s, #5 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %res = sdiv <8 x i32> %op1, shufflevector (<8 x i32> insertelement (<8 x i32> poison, i32 32, i32 0), <8 x i32> poison, <8 x i32> zeroinitializer) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @sdiv_v1i64(<1 x i64> %op1) #0 { +; CHECK-LABEL: sdiv_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #5 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <1 x i64> %op1, shufflevector (<1 x i64> insertelement (<1 x i64> poison, i64 32, i32 0), <1 x i64> poison, <1 x i32> zeroinitializer) + ret <1 x i64> %res +} + +; Vector i64 sdiv are not legal for NEON so use SVE when available. +define <2 x i64> @sdiv_v2i64(<2 x i64> %op1) #0 { +; CHECK-LABEL: sdiv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #5 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = sdiv <2 x i64> %op1, shufflevector (<2 x i64> insertelement (<2 x i64> poison, i64 32, i32 0), <2 x i64> poison, <2 x i32> zeroinitializer) + ret <2 x i64> %res +} + +define void @sdiv_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: sdiv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #5 +; CHECK-NEXT: asrd z1.d, p0/m, z1.d, #5 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %res = sdiv <4 x i64> %op1, shufflevector (<4 x i64> insertelement (<4 x i64> poison, i64 32, i32 0), <4 x i64> poison, <4 x i32> zeroinitializer) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll @@ -0,0 +1,523 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + + +target triple = "aarch64-unknown-linux-gnu" + +; +; DUP (integer) +; + +define <4 x i8> @splat_v4i8(i8 %a) #0 { +; CHECK-LABEL: splat_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strh w0, [sp, #14] +; CHECK-NEXT: strh w0, [sp, #12] +; CHECK-NEXT: strh w0, [sp, #10] +; CHECK-NEXT: strh w0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x i8> undef, i8 %a, i64 0 + %splat = shufflevector <4 x i8> %insert, <4 x i8> undef, <4 x i32> zeroinitializer + ret <4 x i8> %splat +} + +define <8 x i8> @splat_v8i8(i8 %a) #0 { +; CHECK-LABEL: splat_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strb w0, [sp, #15] +; CHECK-NEXT: strb w0, [sp, #14] +; CHECK-NEXT: strb w0, [sp, #13] +; CHECK-NEXT: strb w0, [sp, #12] +; CHECK-NEXT: strb w0, [sp, #11] +; CHECK-NEXT: strb w0, [sp, #10] +; CHECK-NEXT: strb w0, [sp, #9] +; CHECK-NEXT: strb w0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <8 x i8> undef, i8 %a, i64 0 + %splat = shufflevector <8 x i8> %insert, <8 x i8> undef, <8 x i32> zeroinitializer + ret <8 x i8> %splat +} + +define <16 x i8> @splat_v16i8(i8 %a) #0 { +; CHECK-LABEL: splat_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strb w0, [sp, #15] +; CHECK-NEXT: strb w0, [sp, #14] +; CHECK-NEXT: strb w0, [sp, #13] +; CHECK-NEXT: strb w0, [sp, #12] +; CHECK-NEXT: strb w0, [sp, #11] +; CHECK-NEXT: strb w0, [sp, #10] +; CHECK-NEXT: strb w0, [sp, #9] +; CHECK-NEXT: strb w0, [sp, #8] +; CHECK-NEXT: strb w0, [sp, #7] +; CHECK-NEXT: strb w0, [sp, #6] +; CHECK-NEXT: strb w0, [sp, #5] +; CHECK-NEXT: strb w0, [sp, #4] +; CHECK-NEXT: strb w0, [sp, #3] +; CHECK-NEXT: strb w0, [sp, #2] +; CHECK-NEXT: strb w0, [sp, #1] +; CHECK-NEXT: strb w0, [sp] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <16 x i8> undef, i8 %a, i64 0 + %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer + ret <16 x i8> %splat +} + +define void @splat_v32i8(i8 %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: splat_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strb w0, [sp, #15] +; CHECK-NEXT: strb w0, [sp, #14] +; CHECK-NEXT: strb w0, [sp, #13] +; CHECK-NEXT: strb w0, [sp, #12] +; CHECK-NEXT: strb w0, [sp, #11] +; CHECK-NEXT: strb w0, [sp, #10] +; CHECK-NEXT: strb w0, [sp, #9] +; CHECK-NEXT: strb w0, [sp, #8] +; CHECK-NEXT: strb w0, [sp, #7] +; CHECK-NEXT: strb w0, [sp, #6] +; CHECK-NEXT: strb w0, [sp, #5] +; CHECK-NEXT: strb w0, [sp, #4] +; CHECK-NEXT: strb w0, [sp, #3] +; CHECK-NEXT: strb w0, [sp, #2] +; CHECK-NEXT: strb w0, [sp, #1] +; CHECK-NEXT: strb w0, [sp] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <32 x i8> undef, i8 %a, i64 0 + %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer + store <32 x i8> %splat, <32 x i8>* %b + ret void +} + +define <2 x i16> @splat_v2i16(i16 %a) #0 { +; CHECK-LABEL: splat_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp w0, w0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <2 x i16> undef, i16 %a, i64 0 + %splat = shufflevector <2 x i16> %insert, <2 x i16> undef, <2 x i32> zeroinitializer + ret <2 x i16> %splat +} + +define <4 x i16> @splat_v4i16(i16 %a) #0 { +; CHECK-LABEL: splat_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strh w0, [sp, #14] +; CHECK-NEXT: strh w0, [sp, #12] +; CHECK-NEXT: strh w0, [sp, #10] +; CHECK-NEXT: strh w0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x i16> undef, i16 %a, i64 0 + %splat = shufflevector <4 x i16> %insert, <4 x i16> undef, <4 x i32> zeroinitializer + ret <4 x i16> %splat +} + +define <8 x i16> @splat_v8i16(i16 %a) #0 { +; CHECK-LABEL: splat_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strh w0, [sp, #14] +; CHECK-NEXT: strh w0, [sp, #12] +; CHECK-NEXT: strh w0, [sp, #10] +; CHECK-NEXT: strh w0, [sp, #8] +; CHECK-NEXT: strh w0, [sp, #6] +; CHECK-NEXT: strh w0, [sp, #4] +; CHECK-NEXT: strh w0, [sp, #2] +; CHECK-NEXT: strh w0, [sp] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <8 x i16> undef, i16 %a, i64 0 + %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer + ret <8 x i16> %splat +} + +define void @splat_v16i16(i16 %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: splat_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: strh w0, [sp, #14] +; CHECK-NEXT: strh w0, [sp, #12] +; CHECK-NEXT: strh w0, [sp, #10] +; CHECK-NEXT: strh w0, [sp, #8] +; CHECK-NEXT: strh w0, [sp, #6] +; CHECK-NEXT: strh w0, [sp, #4] +; CHECK-NEXT: strh w0, [sp, #2] +; CHECK-NEXT: strh w0, [sp] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <16 x i16> undef, i16 %a, i64 0 + %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer + store <16 x i16> %splat, <16 x i16>* %b + ret void +} + +define <2 x i32> @splat_v2i32(i32 %a) #0 { +; CHECK-LABEL: splat_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp w0, w0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <2 x i32> undef, i32 %a, i64 0 + %splat = shufflevector <2 x i32> %insert, <2 x i32> undef, <2 x i32> zeroinitializer + ret <2 x i32> %splat +} + +define <4 x i32> @splat_v4i32(i32 %a) #0 { +; CHECK-LABEL: splat_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp w0, w0, [sp, #8] +; CHECK-NEXT: stp w0, w0, [sp] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x i32> undef, i32 %a, i64 0 + %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} + +define void @splat_v8i32(i32 %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: splat_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp w0, w0, [sp, #8] +; CHECK-NEXT: stp w0, w0, [sp] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <8 x i32> undef, i32 %a, i64 0 + %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer + store <8 x i32> %splat, <8 x i32>* %b + ret void +} + +define <1 x i64> @splat_v1i64(i64 %a) #0 { +; CHECK-LABEL: splat_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, x0 +; CHECK-NEXT: ret + %insert = insertelement <1 x i64> undef, i64 %a, i64 0 + %splat = shufflevector <1 x i64> %insert, <1 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %splat +} + +define <2 x i64> @splat_v2i64(i64 %a) #0 { +; CHECK-LABEL: splat_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x0, x0, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <2 x i64> undef, i64 %a, i64 0 + %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer + ret <2 x i64> %splat +} + +define void @splat_v4i64(i64 %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: splat_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x0, x0, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x i64> undef, i64 %a, i64 0 + %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer + store <4 x i64> %splat, <4 x i64>* %b + ret void +} + +; +; DUP (floating-point) +; + +define <2 x half> @splat_v2f16(half %a) #0 { +; CHECK-LABEL: splat_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: str h0, [sp, #10] +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <2 x half> undef, half %a, i64 0 + %splat = shufflevector <2 x half> %insert, <2 x half> undef, <2 x i32> zeroinitializer + ret <2 x half> %splat +} + +define <4 x half> @splat_v4f16(half %a) #0 { +; CHECK-LABEL: splat_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: str h0, [sp, #14] +; CHECK-NEXT: str h0, [sp, #12] +; CHECK-NEXT: str h0, [sp, #10] +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x half> undef, half %a, i64 0 + %splat = shufflevector <4 x half> %insert, <4 x half> undef, <4 x i32> zeroinitializer + ret <4 x half> %splat +} + +define <8 x half> @splat_v8f16(half %a) #0 { +; CHECK-LABEL: splat_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: str h0, [sp, #14] +; CHECK-NEXT: str h0, [sp, #12] +; CHECK-NEXT: str h0, [sp, #10] +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: str h0, [sp, #6] +; CHECK-NEXT: str h0, [sp, #4] +; CHECK-NEXT: str h0, [sp, #2] +; CHECK-NEXT: str h0, [sp] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <8 x half> undef, half %a, i64 0 + %splat = shufflevector <8 x half> %insert, <8 x half> undef, <8 x i32> zeroinitializer + ret <8 x half> %splat +} + +define void @splat_v16f16(half %a, <16 x half>* %b) #0 { +; CHECK-LABEL: splat_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: str h0, [sp, #14] +; CHECK-NEXT: str h0, [sp, #12] +; CHECK-NEXT: str h0, [sp, #10] +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: str h0, [sp, #6] +; CHECK-NEXT: str h0, [sp, #4] +; CHECK-NEXT: str h0, [sp, #2] +; CHECK-NEXT: str h0, [sp] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <16 x half> undef, half %a, i64 0 + %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer + store <16 x half> %splat, <16 x half>* %b + ret void +} + +define <2 x float> @splat_v2f32(float %a, <2 x float> %op2) #0 { +; CHECK-LABEL: splat_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp s0, s0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <2 x float> undef, float %a, i64 0 + %splat = shufflevector <2 x float> %insert, <2 x float> undef, <2 x i32> zeroinitializer + ret <2 x float> %splat +} + +define <4 x float> @splat_v4f32(float %a, <4 x float> %op2) #0 { +; CHECK-LABEL: splat_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp s0, s0, [sp, #8] +; CHECK-NEXT: stp s0, s0, [sp] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x float> undef, float %a, i64 0 + %splat = shufflevector <4 x float> %insert, <4 x float> undef, <4 x i32> zeroinitializer + ret <4 x float> %splat +} + +define void @splat_v8f32(float %a, <8 x float>* %b) #0 { +; CHECK-LABEL: splat_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: stp s0, s0, [sp, #8] +; CHECK-NEXT: stp s0, s0, [sp] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <8 x float> undef, float %a, i64 0 + %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer + store <8 x float> %splat, <8 x float>* %b + ret void +} + +define <1 x double> @splat_v1f64(double %a, <1 x double> %op2) #0 { +; CHECK-LABEL: splat_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %insert = insertelement <1 x double> undef, double %a, i64 0 + %splat = shufflevector <1 x double> %insert, <1 x double> undef, <1 x i32> zeroinitializer + ret <1 x double> %splat +} + +define <2 x double> @splat_v2f64(double %a, <2 x double> %op2) #0 { +; CHECK-LABEL: splat_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: stp d0, d0, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %insert = insertelement <2 x double> undef, double %a, i64 0 + %splat = shufflevector <2 x double> %insert, <2 x double> undef, <2 x i32> zeroinitializer + ret <2 x double> %splat +} + +define void @splat_v4f64(double %a, <4 x double>* %b) #0 { +; CHECK-LABEL: splat_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: stp d0, d0, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %insert = insertelement <4 x double> undef, double %a, i64 0 + %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer + store <4 x double> %splat, <4 x double>* %b + ret void +} + +; +; DUP (integer immediate) +; + +define void @splat_imm_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: splat_imm_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI24_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI24_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <32 x i8> undef, i8 1, i64 0 + %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer + store <32 x i8> %splat, <32 x i8>* %a + ret void +} + +define void @splat_imm_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: splat_imm_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI25_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI25_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <16 x i16> undef, i16 2, i64 0 + %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer + store <16 x i16> %splat, <16 x i16>* %a + ret void +} + +define void @splat_imm_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: splat_imm_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI26_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI26_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <8 x i32> undef, i32 3, i64 0 + %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer + store <8 x i32> %splat, <8 x i32>* %a + ret void +} + +define void @splat_imm_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: splat_imm_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI27_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <4 x i64> undef, i64 4, i64 0 + %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer + store <4 x i64> %splat, <4 x i64>* %a + ret void +} + +; +; DUP (floating-point immediate) +; + +define void @splat_imm_v16f16(<16 x half>* %a) #0 { +; CHECK-LABEL: splat_imm_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI28_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI28_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <16 x half> undef, half 5.0, i64 0 + %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer + store <16 x half> %splat, <16 x half>* %a + ret void +} + +define void @splat_imm_v8f32(<8 x float>* %a) #0 { +; CHECK-LABEL: splat_imm_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI29_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI29_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <8 x float> undef, float 6.0, i64 0 + %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer + store <8 x float> %splat, <8 x float>* %a + ret void +} + +define void @splat_imm_v4f64(<4 x double>* %a) #0 { +; CHECK-LABEL: splat_imm_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI30_0 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI30_0] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %insert = insertelement <4 x double> undef, double 7.0, i64 0 + %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer + store <4 x double> %splat, <4 x double>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll @@ -0,0 +1,663 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; truncate i16 -> i8 +; + +define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v16i16_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %in + %b = trunc <16 x i16> %a to <16 x i8> + ret <16 x i8> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 { +; CHECK-LABEL: trunc_v32i16_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldp q1, q0, [x0, #32] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z17.h, z1.h[6] +; CHECK-NEXT: mov z18.h, z1.h[5] +; CHECK-NEXT: mov z19.h, z1.h[4] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.h, z0.h[7] +; CHECK-NEXT: mov z3.h, z0.h[6] +; CHECK-NEXT: mov z4.h, z0.h[5] +; CHECK-NEXT: ldp q22, q23, [x0] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #24] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strb w9, [sp, #16] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z5.h, z0.h[4] +; CHECK-NEXT: mov z6.h, z0.h[3] +; CHECK-NEXT: mov z7.h, z0.h[2] +; CHECK-NEXT: strb w10, [sp, #31] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #30] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: strb w9, [sp, #29] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: mov z16.h, z0.h[1] +; CHECK-NEXT: mov z0.h, z1.h[7] +; CHECK-NEXT: strb w10, [sp, #28] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strb w8, [sp, #27] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strb w9, [sp, #26] +; CHECK-NEXT: fmov w9, s17 +; CHECK-NEXT: mov z20.h, z1.h[3] +; CHECK-NEXT: strb w10, [sp, #25] +; CHECK-NEXT: fmov w10, s18 +; CHECK-NEXT: strb w8, [sp, #23] +; CHECK-NEXT: fmov w8, s19 +; CHECK-NEXT: strb w9, [sp, #22] +; CHECK-NEXT: fmov w9, s20 +; CHECK-NEXT: mov z21.h, z1.h[2] +; CHECK-NEXT: mov z0.h, z1.h[1] +; CHECK-NEXT: strb w10, [sp, #21] +; CHECK-NEXT: fmov w10, s21 +; CHECK-NEXT: strb w8, [sp, #20] +; CHECK-NEXT: strb w9, [sp, #19] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s23 +; CHECK-NEXT: mov z0.h, z23.h[7] +; CHECK-NEXT: mov z1.h, z23.h[6] +; CHECK-NEXT: strb w10, [sp, #18] +; CHECK-NEXT: fmov w10, s22 +; CHECK-NEXT: strb w8, [sp, #17] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strb w9, [sp, #8] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z2.h, z23.h[5] +; CHECK-NEXT: mov z3.h, z23.h[4] +; CHECK-NEXT: mov z4.h, z23.h[3] +; CHECK-NEXT: strb w10, [sp] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #15] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strb w9, [sp, #14] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z5.h, z23.h[2] +; CHECK-NEXT: mov z6.h, z23.h[1] +; CHECK-NEXT: mov z7.h, z22.h[7] +; CHECK-NEXT: strb w10, [sp, #13] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #12] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: strb w9, [sp, #11] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: mov z16.h, z22.h[6] +; CHECK-NEXT: mov z17.h, z22.h[5] +; CHECK-NEXT: mov z18.h, z22.h[4] +; CHECK-NEXT: strb w10, [sp, #10] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strb w8, [sp, #9] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: strb w9, [sp, #7] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z19.h, z22.h[3] +; CHECK-NEXT: mov z20.h, z22.h[2] +; CHECK-NEXT: mov z21.h, z22.h[1] +; CHECK-NEXT: strb w10, [sp, #6] +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: strb w8, [sp, #5] +; CHECK-NEXT: fmov w8, s20 +; CHECK-NEXT: strb w9, [sp, #4] +; CHECK-NEXT: fmov w9, s21 +; CHECK-NEXT: strb w10, [sp, #3] +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: strb w9, [sp, #1] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: add z1.b, z1.b, z1.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %a = load <32 x i16>, <32 x i16>* %in + %b = trunc <32 x i16> %a to <32 x i8> + %c = add <32 x i8> %b, %b + store <32 x i8> %c, <32 x i8>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v64i16_v64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl64 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <64 x i16>, <64 x i16>* %in + %b = trunc <64 x i16> %a to <64 x i8> + %c = add <64 x i8> %b, %b + store <64 x i8> %c, <64 x i8>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v128i16_v128i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl128 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl128 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <128 x i16>, <128 x i16>* %in + %b = trunc <128 x i16> %a to <128 x i8> + %c = add <128 x i8> %b, %b + store <128 x i8> %c, <128 x i8>* %out + ret void +} + +; +; truncate i32 -> i8 +; + +define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v8i32_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + %b = trunc <8 x i32> %a to <8 x i8> + ret <8 x i8> %b +} + +define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 { +; CHECK-LABEL: trunc_v16i32_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: ldp q1, q0, [x0, #32] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z7.s, z1.s[2] +; CHECK-NEXT: mov z16.s, z1.s[1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z4.s, z0.s[3] +; CHECK-NEXT: mov z5.s, z0.s[2] +; CHECK-NEXT: mov z6.s, z0.s[1] +; CHECK-NEXT: strb w9, [sp, #8] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w8, [sp, #12] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z0.s, z1.s[3] +; CHECK-NEXT: mov z19.s, z2.s[2] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strb w9, [sp, #15] +; CHECK-NEXT: strb w8, [sp] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z1.s, z3.s[3] +; CHECK-NEXT: strb w10, [sp, #4] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #13] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z17.s, z3.s[2] +; CHECK-NEXT: mov z18.s, z3.s[1] +; CHECK-NEXT: strb w10, [sp, #14] +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: strb w9, [sp, #11] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strb w8, [sp, #9] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: strb w10, [sp, #10] +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: mov z3.s, z2.s[3] +; CHECK-NEXT: mov z20.s, z2.s[1] +; CHECK-NEXT: strb w9, [sp, #7] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w10, [sp, #6] +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: strb w8, [sp, #5] +; CHECK-NEXT: fmov w8, s20 +; CHECK-NEXT: strb w9, [sp, #3] +; CHECK-NEXT: strb w10, [sp, #2] +; CHECK-NEXT: strb w8, [sp, #1] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %a = load <16 x i32>, <16 x i32>* %in + %b = trunc <16 x i32> %a to <16 x i8> + ret <16 x i8> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v32i32_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i32>, <32 x i32>* %in + %b = trunc <32 x i32> %a to <32 x i8> + %c = add <32 x i8> %b, %b + store <32 x i8> %c, <32 x i8>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v64i32_v64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl64 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <64 x i32>, <64 x i32>* %in + %b = trunc <64 x i32> %a to <64 x i8> + %c = add <64 x i8> %b, %b + store <64 x i8> %c, <64 x i8>* %out + ret void +} + +; +; truncate i32 -> i16 +; + +define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v8i32_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + %b = trunc <8 x i32> %a to <8 x i16> + ret <8 x i16> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 { +; CHECK-LABEL: trunc_v16i32_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldp q1, q0, [x0, #32] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z5.s, z1.s[2] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.s, z0.s[3] +; CHECK-NEXT: mov z3.s, z0.s[2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: ldp q6, q7, [x0] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z4.s, z0.s[1] +; CHECK-NEXT: mov z0.s, z1.s[3] +; CHECK-NEXT: strh w9, [sp, #16] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w10, [sp, #30] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: mov z0.s, z1.s[1] +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: strh w10, [sp, #22] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: strh w8, [sp, #20] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: mov z0.s, z7.s[3] +; CHECK-NEXT: mov z1.s, z7.s[2] +; CHECK-NEXT: mov z2.s, z7.s[1] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z3.s, z6.s[3] +; CHECK-NEXT: mov z4.s, z6.s[2] +; CHECK-NEXT: mov z5.s, z6.s[1] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: add z1.h, z1.h, z1.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %a = load <16 x i32>, <16 x i32>* %in + %b = trunc <16 x i32> %a to <16 x i16> + %c = add <16 x i16> %b, %b + store <16 x i16> %c, <16 x i16>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v32i32_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl32 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i32>, <32 x i32>* %in + %b = trunc <32 x i32> %a to <32 x i16> + %c = add <32 x i16> %b, %b + store <32 x i16> %c, <32 x i16>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v64i32_v64i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl64 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl64 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <64 x i32>, <64 x i32>* %in + %b = trunc <64 x i32> %a to <64 x i16> + %c = add <64 x i16> %b, %b + store <64 x i16> %c, <64 x i16>* %out + ret void +} + +; +; truncate i64 -> i8 +; + +; NOTE: v4i8 is not legal so result i8 elements are held within i16 containers. +define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v4i64_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + %b = trunc <4 x i64> %a to <4 x i8> + ret <4 x i8> %b +} + +define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %in) #0 { +; CHECK-LABEL: trunc_v8i64_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: xtn v0.2s, v0.2d +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z4.s, z1.s[1] +; CHECK-NEXT: xtn v3.2s, v3.2d +; CHECK-NEXT: strb w9, [sp, #12] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: xtn v2.2s, v2.2d +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z0.s, z2.s[1] +; CHECK-NEXT: mov z2.s, z3.s[1] +; CHECK-NEXT: strb w9, [sp, #15] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strb w10, [sp, #10] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strb w8, [sp, #11] +; CHECK-NEXT: strb w10, [sp, #13] +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %in + %b = trunc <8 x i64> %a to <8 x i8> + ret <8 x i8> %b +} + +define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v16i64_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <16 x i64>, <16 x i64>* %in + %b = trunc <16 x i64> %a to <16 x i8> + ret <16 x i8> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v32i64_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i64>, <32 x i64>* %in + %b = trunc <32 x i64> %a to <32 x i8> + %c = add <32 x i8> %b, %b + store <32 x i8> %c, <32 x i8>* %out + ret void +} + +; +; truncate i64 -> i16 +; + +define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v4i64_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + %b = trunc <4 x i64> %a to <4 x i16> + ret <4 x i16> %b +} + +define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 { +; CHECK-LABEL: trunc_v8i64_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: xtn v0.2s, v0.2d +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z4.s, z1.s[1] +; CHECK-NEXT: xtn v3.2s, v3.2d +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: xtn v2.2s, v2.2d +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z0.s, z2.s[1] +; CHECK-NEXT: mov z2.s, z3.s[1] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %in + %b = trunc <8 x i64> %a to <8 x i16> + ret <8 x i16> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v16i64_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i64>, <16 x i64>* %in + %b = trunc <16 x i64> %a to <16 x i16> + %c = add <16 x i16> %b, %b + store <16 x i16> %c, <16 x i16>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v32i64_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl32 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i64>, <32 x i64>* %in + %b = trunc <32 x i64> %a to <32 x i16> + %c = add <32 x i16> %b, %b + store <32 x i16> %c, <32 x i16>* %out + ret void +} + +; +; truncate i64 -> i32 +; + +define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v4i64_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + %b = trunc <4 x i64> %a to <4 x i32> + ret <4 x i32> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 { +; CHECK-LABEL: trunc_v8i64_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: xtn v0.2s, v0.2d +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: xtn v2.2s, v2.2d +; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: xtn v3.2s, v3.2d +; CHECK-NEXT: splice z2.s, p0, z2.s, z3.s +; CHECK-NEXT: add z1.s, z2.s, z2.s +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %in + %b = trunc <8 x i64> %a to <8 x i32> + %c = add <8 x i32> %b, %b + store <8 x i32> %c, <8 x i32>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v16i64_v16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.s, vl16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i64>, <16 x i64>* %in + %b = trunc <16 x i64> %a to <16 x i32> + %c = add <16 x i32> %b, %b + store <16 x i32> %c, <16 x i32>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i64_v32i32(<32 x i64>* %in, <32 x i32>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v32i64_v32i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i64>, <32 x i64>* %in + %b = trunc <32 x i64> %a to <32 x i32> + %c = add <32 x i32> %b, %b + store <32 x i32> %c, <32 x i32>* %out + ret void +} + +attributes #0 = { nounwind "target-features"="+sve" }