diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1407,6 +1407,7 @@ setTruncStoreAction(MVT::v1f64, MVT::v1f32, Custom); setTruncStoreAction(MVT::v2f64, MVT::v2f32, Custom); setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); + for (MVT VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) addTypeForStreamingSVE(VT); @@ -12736,7 +12737,8 @@ if (!VT.isVector() || VT.isScalableVector()) return SDValue(); - if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), + Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthVectorTruncateToSVE(Op, DAG); return SDValue(); @@ -13959,10 +13961,11 @@ if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) return false; - if (Subtarget->useSVEForFixedLengthVectors() && - (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || - (VecSize < Subtarget->getMinSVEVectorSizeInBits() && - isPowerOf2_32(NumElements) && VecSize > 128))) { + if (Subtarget->forceStreamingCompatibleSVE() || + (Subtarget->useSVEForFixedLengthVectors() && + (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 || + (VecSize < Subtarget->getMinSVEVectorSizeInBits() && + isPowerOf2_32(NumElements) && VecSize > 128)))) { UseScalable = true; return true; } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -261,6 +261,8 @@ bool isLegalMaskedGatherScatter(Type *DataType) const { if (!ST->hasSVE()) return false; + if (ST->forceStreamingCompatibleSVE()) + return false; // For fixed vectors, scalarize if not using SVE for them. auto *DataTypeFVTy = dyn_cast(DataType); diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll @@ -161,22 +161,15 @@ define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) #0 { ; CHECK-LABEL: test_copysign_v2f32_v2f64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [x1] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: fcvt z0.s, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: ldr d0, [x0] -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff -; CHECK-NEXT: ldr d1, [sp, #8] -; CHECK-NEXT: and z1.s, z1.s, #0x80000000 -; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: and z1.s, z1.s, #0x7fffffff +; CHECK-NEXT: and z0.s, z0.s, #0x80000000 +; CHECK-NEXT: orr z0.d, z1.d, z0.d ; CHECK-NEXT: str d0, [x0] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <2 x float>, ptr %ap %b = load <2 x double>, ptr %bp @@ -192,27 +185,19 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) #0 { ; CHECK-LABEL: test_copysign_v4f32_v4f64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x1] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvt z1.s, p0/m, z1.d -; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: ldr q2, [x0] ; CHECK-NEXT: fcvt z0.s, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: mov z2.d, z1.d[1] -; CHECK-NEXT: fmov x11, d2 -; CHECK-NEXT: ldr q0, [x0] -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff -; CHECK-NEXT: ldr q1, [sp] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s ; CHECK-NEXT: and z1.s, z1.s, #0x80000000 -; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: and z2.s, z2.s, #0x7fffffff +; CHECK-NEXT: orr z0.d, z2.d, z1.d ; CHECK-NEXT: str q0, [x0] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <4 x float>, ptr %ap %b = load <4 x double>, ptr %bp @@ -295,29 +280,15 @@ define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) #0 { ; CHECK-LABEL: test_copysign_v4f16_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [x1] ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: fcvt z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 ; CHECK-NEXT: ldr d1, [x0] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: fcvt z0.h, p0/m, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: and z1.h, z1.h, #0x7fff -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] ; CHECK-NEXT: and z0.h, z0.h, #0x8000 ; CHECK-NEXT: orr z0.d, z1.d, z0.d ; CHECK-NEXT: str d0, [x0] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <4 x half>, ptr %ap %b = load <4 x float>, ptr %bp @@ -364,41 +335,19 @@ define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) #0 { ; CHECK-LABEL: test_copysign_v8f16_v8f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q1, q0, [x1] ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: fcvt z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z5.s, z0.s[2] ; CHECK-NEXT: fcvt z1.h, p0/m, z1.s -; CHECK-NEXT: mov z6.s, z0.s[1] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z2.s, z1.s[3] -; CHECK-NEXT: mov z3.s, z1.s[2] -; CHECK-NEXT: mov z4.s, z1.s[1] -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: ldr q0, [x0] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: and z0.h, z0.h, #0x7fff -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: strh w9, [sp, #2] -; CHECK-NEXT: ldr q1, [sp] +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: ldr q2, [x0] +; CHECK-NEXT: fcvt z0.h, p0/m, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: splice z1.h, p0, z1.h, z0.h ; CHECK-NEXT: and z1.h, z1.h, #0x8000 -; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: and z2.h, z2.h, #0x7fff +; CHECK-NEXT: orr z0.d, z2.d, z1.d ; CHECK-NEXT: str q0, [x0] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <8 x half>, ptr %ap %b = load <8 x float>, ptr %bp diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll @@ -7,15 +7,12 @@ define void @fp_convert_combine_crash(<8 x float> *%a, <8 x i32> *%b) #0 { ; CHECK-LABEL: fp_convert_combine_crash: ; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, .LCPI0_0 -; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q0, q2, [x0] -; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI0_0] -; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z2.s +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: fmov z1.s, #8.00000000 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: fmul z0.s, z0.s, z1.s ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s -; CHECK-NEXT: fcvtzs z1.s, p0/m, z1.s -; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: st1w { z0.s }, p0, [x1] ; CHECK-NEXT: ret %f = load <8 x float>, <8 x float>* %a %mul.i = fmul <8 x float> %f, @select_v2f16(<2 x half> %op1, <2 x half> %op2, i1 %mask) #0 { ; CHECK-LABEL: select_v2f16: ; CHECK: // %bb.0: @@ -88,7 +89,7 @@ ret <8 x half> %sel } -define void @select_v16f16(ptr %a, ptr %b, i1 %mask) #0 { +define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, i1 %mask) #0 { ; CHECK-LABEL: select_v16f16: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #16 @@ -120,13 +121,14 @@ ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret - %op1 = load volatile <16 x half>, ptr %a - %op2 = load volatile <16 x half>, ptr %b + %op1 = load volatile <16 x half>, <16 x half>* %a + %op2 = load volatile <16 x half>, <16 x half>* %b %sel = select i1 %mask, <16 x half> %op1, <16 x half> %op2 - store <16 x half> %sel, ptr %a + store <16 x half> %sel, <16 x half>* %a ret void } +; f32 define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, i1 %mask) #0 { ; CHECK-LABEL: select_v2f32: ; CHECK: // %bb.0: @@ -176,7 +178,7 @@ ret <4 x float> %sel } -define void @select_v8f32(ptr %a, ptr %b, i1 %mask) #0 { +define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, i1 %mask) #0 { ; CHECK-LABEL: select_v8f32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #16 @@ -202,45 +204,26 @@ ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret - %op1 = load volatile <8 x float>, ptr %a - %op2 = load volatile <8 x float>, ptr %b + %op1 = load volatile <8 x float>, <8 x float>* %a + %op2 = load volatile <8 x float>, <8 x float>* %b %sel = select i1 %mask, <8 x float> %op1, <8 x float> %op2 - store <8 x float> %sel, ptr %a + store <8 x float> %sel, <8 x float>* %a ret void } -define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, i1 %mask) #0 { -; CHECK-LABEL: select_v1f64: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w0, #0x1 -; CHECK-NEXT: mov x9, #-1 -; CHECK-NEXT: csetm x8, ne -; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 -; CHECK-NEXT: fmov d3, x9 -; CHECK-NEXT: fmov d2, x8 -; CHECK-NEXT: eor z3.d, z2.d, z3.d -; CHECK-NEXT: and z0.d, z0.d, z2.d -; CHECK-NEXT: and z1.d, z1.d, z3.d -; CHECK-NEXT: orr z0.d, z0.d, z1.d -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: ret - %sel = select i1 %mask, <1 x double> %op1, <1 x double> %op2 - ret <1 x double> %sel -} - +; f64 define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, i1 %mask) #0 { ; CHECK-LABEL: select_v2f64: ; CHECK: // %bb.0: ; CHECK-NEXT: tst w0, #0x1 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 -; CHECK-NEXT: adrp x9, .LCPI8_0 +; CHECK-NEXT: adrp x9, .LCPI7_0 ; CHECK-NEXT: csetm x8, ne ; CHECK-NEXT: stp x8, x8, [sp, #-16]! ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q2, [sp] -; CHECK-NEXT: ldr q3, [x9, :lo12:.LCPI8_0] +; CHECK-NEXT: ldr q3, [x9, :lo12:.LCPI7_0] ; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: eor z3.d, z2.d, z3.d ; CHECK-NEXT: and z1.d, z1.d, z3.d @@ -252,7 +235,7 @@ ret <2 x double> %sel } -define void @select_v4f64(ptr %a, ptr %b, i1 %mask) #0 { +define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, i1 %mask) #0 { ; CHECK-LABEL: select_v4f64: ; CHECK: // %bb.0: ; CHECK-NEXT: tst w2, #0x1 @@ -260,11 +243,11 @@ ; CHECK-NEXT: csetm x8, ne ; CHECK-NEXT: ldr q1, [x0, #16] ; CHECK-NEXT: ldr q2, [x1] -; CHECK-NEXT: adrp x9, .LCPI9_0 +; CHECK-NEXT: adrp x9, .LCPI8_0 ; CHECK-NEXT: ldr q3, [x1, #16] ; CHECK-NEXT: stp x8, x8, [sp, #-16]! ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI9_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI8_0] ; CHECK-NEXT: ldr q5, [sp] ; CHECK-NEXT: eor z4.d, z5.d, z4.d ; CHECK-NEXT: and z1.d, z1.d, z5.d @@ -276,10 +259,10 @@ ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret - %op1 = load volatile <4 x double>, ptr %a - %op2 = load volatile <4 x double>, ptr %b + %op1 = load volatile <4 x double>, <4 x double>* %a + %op2 = load volatile <4 x double>, <4 x double>* %b %sel = select i1 %mask, <4 x double> %op1, <4 x double> %op2 - store <4 x double> %sel, ptr %a + store <4 x double> %sel, <4 x double>* %a ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll @@ -290,24 +290,11 @@ define <4 x i16> @fcvtzu_v4f32_v4i16(<4 x float> %op1) #0 { ; CHECK-LABEL: fcvtzu_v4f32_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptoui <4 x float> %op1 to <4 x i16> ret <4 x i16> %res @@ -316,35 +303,15 @@ define <8 x i16> @fcvtzu_v8f32_v8i16(<8 x float>* %a) #0 { ; CHECK-LABEL: fcvtzu_v8f32_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: fcvtzu z1.s, p0/m, z1.s -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z5.s, z1.s[2] ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z2.s, z0.s[3] -; CHECK-NEXT: mov z3.s, z0.s[2] -; CHECK-NEXT: mov z4.s, z0.s[1] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: strh w9, [sp, #2] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z2.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z2.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <8 x float>, <8 x float>* %a %res = fptoui <8 x float> %op1 to <8 x i16> @@ -354,64 +321,21 @@ define void @fcvtzu_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 { ; CHECK-LABEL: fcvtzu_v16f32_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ptrue p1.h, vl4 ; CHECK-NEXT: fcvtzu z0.s, p0/m, z0.s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z5.s, z0.s[2] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0, #32] ; CHECK-NEXT: fcvtzu z1.s, p0/m, z1.s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z2.s, z1.s[3] -; CHECK-NEXT: mov z3.s, z1.s[2] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: ldp q6, q7, [x0, #32] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z4.s, z1.s[1] -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: movprfx z1, z7 -; CHECK-NEXT: fcvtzu z1.s, p0/m, z7.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z2.s, z1.s[2] -; CHECK-NEXT: mov z3.s, z1.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: movprfx z1, z6 -; CHECK-NEXT: fcvtzu z1.s, p0/m, z6.s -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #2] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z4.s, z1.s[3] -; CHECK-NEXT: strh w9, [sp, #24] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strh w10, [sp, #16] -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: mov z5.s, z1.s[2] -; CHECK-NEXT: mov z6.s, z1.s[1] -; CHECK-NEXT: strh w8, [sp, #30] -; CHECK-NEXT: fmov w8, s4 -; CHECK-NEXT: strh w9, [sp, #28] -; CHECK-NEXT: fmov w9, s5 -; CHECK-NEXT: strh w10, [sp, #26] -; CHECK-NEXT: fmov w10, s6 -; CHECK-NEXT: strh w8, [sp, #22] -; CHECK-NEXT: strh w9, [sp, #20] -; CHECK-NEXT: strh w10, [sp, #18] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p1, z0.h, z1.h +; CHECK-NEXT: fcvtzu z3.s, p0/m, z3.s +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: fcvtzu z2.s, p0/m, z2.s +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p1, z3.h, z2.h +; CHECK-NEXT: stp q0, q3, [x1] ; CHECK-NEXT: ret %op1 = load <16 x float>, <16 x float>* %a %res = fptoui <16 x float> %op1 to <16 x i16> @@ -553,17 +477,11 @@ define <2 x i16> @fcvtzu_v2f64_v2i16(<2 x double> %op1) #0 { ; CHECK-LABEL: fcvtzu_v2f64_v2i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptoui <2 x double> %op1 to <2 x i16> ret <2 x i16> %res @@ -572,34 +490,26 @@ define <4 x i16> @fcvtzu_v4f64_v4i16(<4 x double>* %a) #0 { ; CHECK-LABEL: fcvtzu_v4f64_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #16] -; CHECK-NEXT: stp w10, w11, [sp, #8] -; CHECK-NEXT: ldp d1, d0, [sp, #8] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: mov z1.s, z1.s[1] ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: strh w8, [sp, #12] ; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: strh w9, [sp, #30] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #24] -; CHECK-NEXT: strh w10, [sp, #26] -; CHECK-NEXT: ldr d0, [sp, #24] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: strh w10, [sp, #14] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %op1 = load <4 x double>, <4 x double>* %a %res = fptoui <4 x double> %op1 to <4 x i16> @@ -609,57 +519,40 @@ define <8 x i16> @fcvtzu_v8f64_v8i16(<8 x double>* %a) #0 { ; CHECK-LABEL: fcvtzu_v8f64_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: .cfi_def_cfa_offset 48 -; CHECK-NEXT: ldp q1, q0, [x0, #32] +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q0, q1, [x0, #32] ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d -; CHECK-NEXT: fmov x9, d1 ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x10, d4 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: ldp q2, q3, [x0] -; CHECK-NEXT: movprfx z1, z3 -; CHECK-NEXT: fcvtzs z1.d, p0/m, z3.d -; CHECK-NEXT: mov z3.d, z1.d[1] -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w10, [sp, #8] -; CHECK-NEXT: fmov x8, d3 -; CHECK-NEXT: fmov x12, d0 -; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: stp w11, w8, [sp, #16] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: stp w9, w12, [sp, #24] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: ldp d0, d2, [sp, #8] -; CHECK-NEXT: ldr d1, [sp, #24] -; CHECK-NEXT: stp w9, w10, [sp] -; CHECK-NEXT: ldr d3, [sp] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: strh w8, [sp, #44] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s ; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z1.s, z3.s[1] -; CHECK-NEXT: strh w8, [sp, #40] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w10, [sp, #32] -; CHECK-NEXT: strh w9, [sp, #42] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w8, [sp, #36] -; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z4.s, z1.s[1] +; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z1.s, z0.s[1] ; CHECK-NEXT: mov z0.s, z2.s[1] -; CHECK-NEXT: strh w9, [sp, #34] -; CHECK-NEXT: strh w8, [sp, #46] +; CHECK-NEXT: mov z2.s, z3.s[1] +; CHECK-NEXT: strh w8, [sp] ; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w8, [sp, #38] -; CHECK-NEXT: ldr q0, [sp, #32] -; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ldr q0, [sp], #16 ; CHECK-NEXT: ret %op1 = load <8 x double>, <8 x double>* %a %res = fptoui <8 x double> %op1 to <8 x i16> @@ -669,108 +562,73 @@ define void @fcvtzu_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 { ; CHECK-LABEL: fcvtzu_v16f64_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q2, q3, [x0, #32] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: ldp q4, q5, [x0] +; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z6.s, z3.s[1] +; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.d +; CHECK-NEXT: mov z3.s, z2.s[1] +; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s +; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.d +; CHECK-NEXT: ldp q0, q1, [x0, #64] +; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: mov z5.s, z5.s[1] ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: fmov x10, d0 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q2, q7, [x0, #96] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: mov z4.s, z4.s[1] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: movprfx z3, z7 +; CHECK-NEXT: fcvtzs z3.d, p0/m, z7.d +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov w10, s4 ; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d -; CHECK-NEXT: mov z6.d, z1.d[1] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d6 -; CHECK-NEXT: mov z6.d, z0.d[1] -; CHECK-NEXT: fmov x11, d6 -; CHECK-NEXT: ldp q7, q1, [x0] -; CHECK-NEXT: ldp q2, q3, [x0, #64] -; CHECK-NEXT: ldp q4, q5, [x0, #96] -; CHECK-NEXT: stp w8, w9, [sp, #32] -; CHECK-NEXT: stp w10, w11, [sp, #48] -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: movprfx z0, z7 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z7.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #40] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: movprfx z0, z5 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z5.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: movprfx z0, z4 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z4.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #16] -; CHECK-NEXT: stp w10, w11, [sp, #8] -; CHECK-NEXT: movprfx z0, z3 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z3.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z2 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.d -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: ldp d0, d2, [sp, #32] -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #24] -; CHECK-NEXT: ldr d1, [sp, #48] -; CHECK-NEXT: ldr d3, [sp] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: stp w10, w11, [sp, #56] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: strh w8, [sp, #76] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w9, [sp, #64] -; CHECK-NEXT: strh w8, [sp, #72] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w10, [sp, #74] -; CHECK-NEXT: strh w8, [sp, #68] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z2.s[1] -; CHECK-NEXT: mov z2.s, z3.s[1] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: ldr d2, [sp, #24] -; CHECK-NEXT: strh w8, [sp, #78] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: ldp d1, d0, [sp, #8] -; CHECK-NEXT: strh w9, [sp, #66] -; CHECK-NEXT: strh w8, [sp, #70] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z0.s, z1.s[1] -; CHECK-NEXT: strh w8, [sp, #92] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: ldr d1, [sp, #56] -; CHECK-NEXT: strh w9, [sp, #94] -; CHECK-NEXT: strh w8, [sp, #88] +; CHECK-NEXT: strh w8, [sp, #28] ; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z2.s, z2.s[1] +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: strh w9, [sp, #6] ; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z4.s, z2.s[1] +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w10, [sp, #16] ; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #84] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z1.s[1] -; CHECK-NEXT: strh w9, [sp, #80] -; CHECK-NEXT: strh w10, [sp, #86] -; CHECK-NEXT: strh w8, [sp, #90] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w8, [sp, #82] -; CHECK-NEXT: ldp q1, q0, [sp, #64] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: strh w10, [sp, #22] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ldp q1, q0, [sp] ; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #96 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %op1 = load <16 x double>, <16 x double>* %a %res = fptoui <16 x double> %op1 to <16 x i16> @@ -785,17 +643,11 @@ define <1 x i32> @fcvtzu_v1f64_v1i32(<1 x double> %op1) #0 { ; CHECK-LABEL: fcvtzu_v1f64_v1i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptoui <1 x double> %op1 to <1 x i32> ret <1 x i32> %res @@ -804,17 +656,11 @@ define <2 x i32> @fcvtzu_v2f64_v2i32(<2 x double> %op1) #0 { ; CHECK-LABEL: fcvtzu_v2f64_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptoui <2 x double> %op1 to <2 x i32> ret <2 x i32> %res @@ -823,22 +669,15 @@ define <4 x i32> @fcvtzu_v4f64_v4i32(<4 x double>* %a) #0 { ; CHECK-LABEL: fcvtzu_v4f64_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: fcvtzu z1.d, p0/m, z1.d ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z2.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z2.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <4 x double>, <4 x double>* %a %res = fptoui <4 x double> %op1 to <4 x i32> @@ -848,37 +687,21 @@ define void @fcvtzu_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 { ; CHECK-LABEL: fcvtzu_v8f64_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s, vl2 ; CHECK-NEXT: fcvtzu z0.d, p0/m, z0.d +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ldp q3, q2, [x0, #32] -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fcvtzu z1.d, p0/m, z2.d -; CHECK-NEXT: movprfx z2, z3 -; CHECK-NEXT: fcvtzu z2.d, p0/m, z3.d -; CHECK-NEXT: fmov x9, d4 -; CHECK-NEXT: mov z3.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d3 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: fmov x10, d2 -; CHECK-NEXT: fmov x11, d0 -; CHECK-NEXT: stp w8, w9, [sp, #24] -; CHECK-NEXT: stp w10, w11, [sp, #16] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: fcvtzu z1.d, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p1, z0.s, z1.s +; CHECK-NEXT: fcvtzu z3.d, p0/m, z3.d +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fcvtzu z2.d, p0/m, z2.d +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p1, z3.s, z2.s +; CHECK-NEXT: stp q0, q3, [x1] ; CHECK-NEXT: ret %op1 = load <8 x double>, <8 x double>* %a %res = fptoui <8 x double> %op1 to <8 x i32> @@ -1216,24 +1039,11 @@ define <4 x i16> @fcvtzs_v4f32_v4i16(<4 x float> %op1) #0 { ; CHECK-LABEL: fcvtzs_v4f32_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptosi <4 x float> %op1 to <4 x i16> ret <4 x i16> %res @@ -1242,35 +1052,15 @@ define <8 x i16> @fcvtzs_v8f32_v8i16(<8 x float>* %a) #0 { ; CHECK-LABEL: fcvtzs_v8f32_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: fcvtzs z1.s, p0/m, z1.s -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z5.s, z1.s[2] ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z2.s, z0.s[3] -; CHECK-NEXT: mov z3.s, z0.s[2] -; CHECK-NEXT: mov z4.s, z0.s[1] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: strh w9, [sp, #2] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z2.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z2.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <8 x float>, <8 x float>* %a %res = fptosi <8 x float> %op1 to <8 x i16> @@ -1280,64 +1070,21 @@ define void @fcvtzs_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 { ; CHECK-LABEL: fcvtzs_v16f32_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ptrue p1.h, vl4 ; CHECK-NEXT: fcvtzs z0.s, p0/m, z0.s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z5.s, z0.s[2] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0, #32] ; CHECK-NEXT: fcvtzs z1.s, p0/m, z1.s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z2.s, z1.s[3] -; CHECK-NEXT: mov z3.s, z1.s[2] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: ldp q6, q7, [x0, #32] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z4.s, z1.s[1] -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: movprfx z1, z7 -; CHECK-NEXT: fcvtzs z1.s, p0/m, z7.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z2.s, z1.s[2] -; CHECK-NEXT: mov z3.s, z1.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: movprfx z1, z6 -; CHECK-NEXT: fcvtzs z1.s, p0/m, z6.s -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #2] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z4.s, z1.s[3] -; CHECK-NEXT: strh w9, [sp, #24] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strh w10, [sp, #16] -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: mov z5.s, z1.s[2] -; CHECK-NEXT: mov z6.s, z1.s[1] -; CHECK-NEXT: strh w8, [sp, #30] -; CHECK-NEXT: fmov w8, s4 -; CHECK-NEXT: strh w9, [sp, #28] -; CHECK-NEXT: fmov w9, s5 -; CHECK-NEXT: strh w10, [sp, #26] -; CHECK-NEXT: fmov w10, s6 -; CHECK-NEXT: strh w8, [sp, #22] -; CHECK-NEXT: strh w9, [sp, #20] -; CHECK-NEXT: strh w10, [sp, #18] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p1, z0.h, z1.h +; CHECK-NEXT: fcvtzs z3.s, p0/m, z3.s +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: fcvtzs z2.s, p0/m, z2.s +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p1, z3.h, z2.h +; CHECK-NEXT: stp q0, q3, [x1] ; CHECK-NEXT: ret %op1 = load <16 x float>, <16 x float>* %a %res = fptosi <16 x float> %op1 to <16 x i16> @@ -1481,17 +1228,11 @@ define <2 x i16> @fcvtzs_v2f64_v2i16(<2 x double> %op1) #0 { ; CHECK-LABEL: fcvtzs_v2f64_v2i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptosi <2 x double> %op1 to <2 x i16> ret <2 x i16> %res @@ -1500,34 +1241,26 @@ define <4 x i16> @fcvtzs_v4f64_v4i16(<4 x double>* %a) #0 { ; CHECK-LABEL: fcvtzs_v4f64_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #16] -; CHECK-NEXT: stp w10, w11, [sp, #8] -; CHECK-NEXT: ldp d1, d0, [sp, #8] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: mov z1.s, z1.s[1] ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: strh w8, [sp, #12] ; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: strh w9, [sp, #30] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #24] -; CHECK-NEXT: strh w10, [sp, #26] -; CHECK-NEXT: ldr d0, [sp, #24] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: strh w10, [sp, #14] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %op1 = load <4 x double>, <4 x double>* %a %res = fptosi <4 x double> %op1 to <4 x i16> @@ -1537,57 +1270,40 @@ define <8 x i16> @fcvtzs_v8f64_v8i16(<8 x double>* %a) #0 { ; CHECK-LABEL: fcvtzs_v8f64_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: .cfi_def_cfa_offset 48 -; CHECK-NEXT: ldp q1, q0, [x0, #32] +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q0, q1, [x0, #32] ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d -; CHECK-NEXT: fmov x9, d1 ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x10, d4 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: ldp q2, q3, [x0] -; CHECK-NEXT: movprfx z1, z3 -; CHECK-NEXT: fcvtzs z1.d, p0/m, z3.d -; CHECK-NEXT: mov z3.d, z1.d[1] -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w10, [sp, #8] -; CHECK-NEXT: fmov x8, d3 -; CHECK-NEXT: fmov x12, d0 -; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: stp w11, w8, [sp, #16] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: stp w9, w12, [sp, #24] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: ldp d0, d2, [sp, #8] -; CHECK-NEXT: ldr d1, [sp, #24] -; CHECK-NEXT: stp w9, w10, [sp] -; CHECK-NEXT: ldr d3, [sp] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: strh w8, [sp, #44] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s ; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z1.s, z3.s[1] -; CHECK-NEXT: strh w8, [sp, #40] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w10, [sp, #32] -; CHECK-NEXT: strh w9, [sp, #42] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w8, [sp, #36] -; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z4.s, z1.s[1] +; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z1.s, z0.s[1] ; CHECK-NEXT: mov z0.s, z2.s[1] -; CHECK-NEXT: strh w9, [sp, #34] -; CHECK-NEXT: strh w8, [sp, #46] +; CHECK-NEXT: mov z2.s, z3.s[1] +; CHECK-NEXT: strh w8, [sp] ; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w8, [sp, #38] -; CHECK-NEXT: ldr q0, [sp, #32] -; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ldr q0, [sp], #16 ; CHECK-NEXT: ret %op1 = load <8 x double>, <8 x double>* %a %res = fptosi <8 x double> %op1 to <8 x i16> @@ -1597,108 +1313,73 @@ define void @fcvtzs_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 { ; CHECK-LABEL: fcvtzs_v16f64_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: .cfi_def_cfa_offset 96 -; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q2, q3, [x0, #32] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: ldp q4, q5, [x0] +; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z6.s, z3.s[1] +; CHECK-NEXT: fcvtzs z4.d, p0/m, z4.d +; CHECK-NEXT: mov z3.s, z2.s[1] +; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s +; CHECK-NEXT: fcvtzs z5.d, p0/m, z5.d +; CHECK-NEXT: ldp q0, q1, [x0, #64] +; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: mov z5.s, z5.s[1] ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: fmov x10, d0 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q2, q7, [x0, #96] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: mov z4.s, z4.s[1] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: movprfx z3, z7 +; CHECK-NEXT: fcvtzs z3.d, p0/m, z7.d +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov w10, s4 ; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d -; CHECK-NEXT: mov z6.d, z1.d[1] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d6 -; CHECK-NEXT: mov z6.d, z0.d[1] -; CHECK-NEXT: fmov x11, d6 -; CHECK-NEXT: ldp q7, q1, [x0] -; CHECK-NEXT: ldp q2, q3, [x0, #64] -; CHECK-NEXT: ldp q4, q5, [x0, #96] -; CHECK-NEXT: stp w8, w9, [sp, #32] -; CHECK-NEXT: stp w10, w11, [sp, #48] -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: movprfx z0, z7 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z7.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #40] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: movprfx z0, z5 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z5.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: movprfx z0, z4 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z4.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #16] -; CHECK-NEXT: stp w10, w11, [sp, #8] -; CHECK-NEXT: movprfx z0, z3 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z3.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z2 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z2.d -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: ldp d0, d2, [sp, #32] -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #24] -; CHECK-NEXT: ldr d1, [sp, #48] -; CHECK-NEXT: ldr d3, [sp] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: stp w10, w11, [sp, #56] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: strh w8, [sp, #76] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w9, [sp, #64] -; CHECK-NEXT: strh w8, [sp, #72] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w10, [sp, #74] -; CHECK-NEXT: strh w8, [sp, #68] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z2.s[1] -; CHECK-NEXT: mov z2.s, z3.s[1] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: ldr d2, [sp, #24] -; CHECK-NEXT: strh w8, [sp, #78] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: ldp d1, d0, [sp, #8] -; CHECK-NEXT: strh w9, [sp, #66] -; CHECK-NEXT: strh w8, [sp, #70] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z0.s, z1.s[1] -; CHECK-NEXT: strh w8, [sp, #92] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: ldr d1, [sp, #56] -; CHECK-NEXT: strh w9, [sp, #94] -; CHECK-NEXT: strh w8, [sp, #88] +; CHECK-NEXT: strh w8, [sp, #28] ; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z2.s, z2.s[1] +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: strh w9, [sp, #6] ; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z4.s, z2.s[1] +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w10, [sp, #16] ; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #84] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z1.s[1] -; CHECK-NEXT: strh w9, [sp, #80] -; CHECK-NEXT: strh w10, [sp, #86] -; CHECK-NEXT: strh w8, [sp, #90] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w8, [sp, #82] -; CHECK-NEXT: ldp q1, q0, [sp, #64] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: strh w10, [sp, #22] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: ldp q1, q0, [sp] ; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #96 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %op1 = load <16 x double>, <16 x double>* %a %res = fptosi <16 x double> %op1 to <16 x i16> @@ -1713,17 +1394,11 @@ define <1 x i32> @fcvtzs_v1f64_v1i32(<1 x double> %op1) #0 { ; CHECK-LABEL: fcvtzs_v1f64_v1i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptosi <1 x double> %op1 to <1 x i32> ret <1 x i32> %res @@ -1732,17 +1407,11 @@ define <2 x i32> @fcvtzs_v2f64_v2i32(<2 x double> %op1) #0 { ; CHECK-LABEL: fcvtzs_v2f64_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = fptosi <2 x double> %op1 to <2 x i32> ret <2 x i32> %res @@ -1751,22 +1420,15 @@ define <4 x i32> @fcvtzs_v4f64_v4i32(<4 x double>* %a) #0 { ; CHECK-LABEL: fcvtzs_v4f64_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z2.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z2.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <4 x double>, <4 x double>* %a %res = fptosi <4 x double> %op1 to <4 x i32> @@ -1776,37 +1438,21 @@ define void @fcvtzs_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 { ; CHECK-LABEL: fcvtzs_v8f64_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s, vl2 ; CHECK-NEXT: fcvtzs z0.d, p0/m, z0.d +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ldp q3, q2, [x0, #32] -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: fcvtzs z1.d, p0/m, z2.d -; CHECK-NEXT: movprfx z2, z3 -; CHECK-NEXT: fcvtzs z2.d, p0/m, z3.d -; CHECK-NEXT: fmov x9, d4 -; CHECK-NEXT: mov z3.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d3 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: fmov x10, d2 -; CHECK-NEXT: fmov x11, d0 -; CHECK-NEXT: stp w8, w9, [sp, #24] -; CHECK-NEXT: stp w10, w11, [sp, #16] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: fcvtzs z1.d, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p1, z0.s, z1.s +; CHECK-NEXT: fcvtzs z3.d, p0/m, z3.d +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fcvtzs z2.d, p0/m, z2.d +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p1, z3.s, z2.s +; CHECK-NEXT: stp q0, q3, [x1] ; CHECK-NEXT: ret %op1 = load <8 x double>, <8 x double>* %a %res = fptosi <8 x double> %op1 to <8 x i32> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll @@ -82,7 +82,7 @@ ret <8 x half> %sel } -define void @select_v16f16(ptr %a, ptr %b) #0 { +define void @select_v16f16(<16 x half>* %a, <16 x half>* %b) #0 { ; CHECK-LABEL: select_v16f16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x1] @@ -104,11 +104,11 @@ ; CHECK-NEXT: orr z1.d, z2.d, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret - %op1 = load <16 x half>, ptr %a - %op2 = load <16 x half>, ptr %b + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b %mask = fcmp oeq <16 x half> %op1, %op2 %sel = select <16 x i1> %mask, <16 x half> %op1, <16 x half> %op2 - store <16 x half> %sel, ptr %a + store <16 x half> %sel, <16 x half>* %a ret void } @@ -159,7 +159,7 @@ ret <4 x float> %sel } -define void @select_v8f32(ptr %a, ptr %b) #0 { +define void @select_v8f32(<8 x float>* %a, <8 x float>* %b) #0 { ; CHECK-LABEL: select_v8f32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x1] @@ -181,46 +181,26 @@ ; CHECK-NEXT: orr z1.d, z2.d, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret - %op1 = load <8 x float>, ptr %a - %op2 = load <8 x float>, ptr %b + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b %mask = fcmp oeq <8 x float> %op1, %op2 %sel = select <8 x i1> %mask, <8 x float> %op1, <8 x float> %op2 - store <8 x float> %sel, ptr %a + store <8 x float> %sel, <8 x float>* %a ret void } -define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, <1 x i1> %mask) #0 { -; CHECK-LABEL: select_v1f64: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w0, #0x1 -; CHECK-NEXT: mov x9, #-1 -; CHECK-NEXT: csetm x8, ne -; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 -; CHECK-NEXT: fmov d3, x9 -; CHECK-NEXT: fmov d2, x8 -; CHECK-NEXT: eor z3.d, z2.d, z3.d -; CHECK-NEXT: and z0.d, z0.d, z2.d -; CHECK-NEXT: and z1.d, z1.d, z3.d -; CHECK-NEXT: orr z0.d, z0.d, z1.d -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: ret - %sel = select <1 x i1> %mask, <1 x double> %op1, <1 x double> %op2 - ret <1 x double> %sel -} - define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x i1> %mask) #0 { ; CHECK-LABEL: select_v2f64: ; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, .LCPI8_0 -; CHECK-NEXT: adrp x9, .LCPI8_1 +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: adrp x9, .LCPI7_1 ; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: uunpklo z2.d, z2.s -; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI8_0] -; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI8_1] +; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI7_1] ; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z3.d ; CHECK-NEXT: asr z2.d, p0/m, z2.d, z3.d ; CHECK-NEXT: eor z3.d, z2.d, z4.d @@ -233,14 +213,14 @@ ret <2 x double> %sel } -define void @select_v4f64(ptr %a, ptr %b) #0 { +define void @select_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { ; CHECK-LABEL: select_v4f64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q0, q1, [x1] -; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: adrp x8, .LCPI8_0 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: ldp q3, q2, [x0] -; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI8_0] ; CHECK-NEXT: fcmeq p1.d, p0/z, z2.d, z1.d ; CHECK-NEXT: fcmeq p0.d, p0/z, z3.d, z0.d ; CHECK-NEXT: mov z5.d, p1/z, #-1 // =0xffffffffffffffff @@ -255,11 +235,11 @@ ; CHECK-NEXT: orr z1.d, z2.d, z1.d ; CHECK-NEXT: stp q0, q1, [x0] ; CHECK-NEXT: ret - %op1 = load <4 x double>, ptr %a - %op2 = load <4 x double>, ptr %b + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b %mask = fcmp oeq <4 x double> %op1, %op2 %sel = select <4 x i1> %mask, <4 x double> %op1, <4 x double> %op2 - store <4 x double> %sel, ptr %a + store <4 x double> %sel, <4 x double>* %a ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-frame-offsets.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-frame-offsets.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-frame-offsets.ll @@ -0,0 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +; REQUIRES: asserts + +target triple = "aarch64-unknown-linux-gnu" + +; Ensure that only no offset frame indexes are folded into SVE load/stores when +; accessing fixed width objects. +define void @foo(<2 x i64>* %a) #0 { +; CHECK-LABEL: foo: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [sp, #16] +; CHECK-NEXT: str q0, [sp], #32 +; CHECK-NEXT: ret +entry: + %r0 = alloca <2 x i64> + %r1 = alloca <2 x i64> + %r = load volatile <2 x i64>, <2 x i64>* %a + store volatile <2 x i64> %r, <2 x i64>* %r0 + store volatile <2 x i64> %r, <2 x i64>* %r1 + ret void +} + +define void @foo2(<4 x i64>* %a) #0 { +; CHECK-LABEL: foo2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: sub x9, sp, #80 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: str q1, [sp, #48] +; CHECK-NEXT: str q0, [sp, #32] +; CHECK-NEXT: str q1, [sp, #16] +; CHECK-NEXT: str q0, [sp] +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret +entry: + %r0 = alloca <4 x i64> + %r1 = alloca <4 x i64> + %r = load volatile <4 x i64>, <4 x i64>* %a + store volatile <4 x i64> %r, <4 x i64>* %r0 + store volatile <4 x i64> %r, <4 x i64>* %r1 + ret void +} + +attributes #0 = { nounwind "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll @@ -0,0 +1,464 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; ICMP EQ +; + +define <4 x i8> @icmp_eq_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i8> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i8> + ret <4 x i8> %sext +} + +define <8 x i8> @icmp_eq_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <8 x i8> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i8> + ret <8 x i8> %sext +} + +define <16 x i8> @icmp_eq_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <16 x i8> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} + +define void @icmp_eq_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: icmp_eq_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z2.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %cmp = icmp eq <32 x i8> %op1, %op2 + %sext = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %sext, <32 x i8>* %a + ret void +} + +define <2 x i16> @icmp_eq_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i16> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i16> + ret <2 x i16> %sext +} + +define <4 x i16> @icmp_eq_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i16> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i16> + ret <4 x i16> %sext +} + +define <8 x i16> @icmp_eq_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <8 x i16> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} + +define void @icmp_eq_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_eq_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp eq <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +define <2 x i32> @icmp_eq_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i32> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %sext +} + +define <4 x i32> @icmp_eq_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i32> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} + +define void @icmp_eq_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_eq_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z2.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp eq <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +define <1 x i64> @icmp_eq_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +; CHECK-LABEL: icmp_eq_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <1 x i64> %op1, %op2 + %sext = sext <1 x i1> %cmp to <1 x i64> + ret <1 x i64> %sext +} + +define <2 x i64> @icmp_eq_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i64> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i64> + ret <2 x i64> %sext +} + +define void @icmp_eq_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_eq_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp eq <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP NE +; + +define void @icmp_ne_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: icmp_ne_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpne p1.b, p0/z, z0.b, z2.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %cmp = icmp ne <32 x i8> %op1, %op2 + %sext = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %sext, <32 x i8>* %a + ret void +} + +; +; ICMP SGE +; + +define void @icmp_sge_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_sge_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpge p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpge p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp sge <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +; +; ICMP SGT +; + +define void @icmp_sgt_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_sgt_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpgt p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp sgt <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +; +; ICMP SLE +; + +define void @icmp_sle_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_sle_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpge p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpge p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp sle <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +; +; ICMP SLT +; + +define void @icmp_slt_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_slt_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp slt <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +; +; ICMP UGE +; + +define void @icmp_uge_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_uge_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphs p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphs p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp uge <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP UGT +; + +define void @icmp_ugt_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ugt_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ugt <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP ULE +; + +define void @icmp_ule_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ule_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphs p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphs p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ule <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP ULT +; + +define void @icmp_ult_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ult_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ult <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll @@ -10,34 +10,21 @@ define <4 x i8> @sdiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: sdiv_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI0_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] -; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: asr z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: sunpklo z1.s, z1.h ; CHECK-NEXT: sunpklo z0.s, z0.h +; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sdiv <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -46,8 +33,6 @@ define <8 x i8> @sdiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: sdiv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -60,31 +45,8 @@ ; CHECK-NEXT: sdivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z2.h, z0.h[6] -; CHECK-NEXT: mov z3.h, z0.h[5] -; CHECK-NEXT: mov z4.h, z0.h[4] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z0.h[3] -; CHECK-NEXT: mov z6.h, z0.h[2] -; CHECK-NEXT: mov z0.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sdiv <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -196,27 +158,14 @@ define <4 x i16> @sdiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: sdiv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: sunpklo z1.s, z1.h ; CHECK-NEXT: sunpklo z0.s, z0.h ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sdiv <4 x i16> %op1, %op2 ret <4 x i16> %res @@ -363,31 +312,18 @@ define <4 x i8> @udiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: udiv_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI14_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI14_0] -; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: uunpklo z1.s, z1.h ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = udiv <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -396,8 +332,6 @@ define <8 x i8> @udiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: udiv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -410,31 +344,8 @@ ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z2.h, z0.h[6] -; CHECK-NEXT: mov z3.h, z0.h[5] -; CHECK-NEXT: mov z4.h, z0.h[4] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z0.h[3] -; CHECK-NEXT: mov z6.h, z0.h[2] -; CHECK-NEXT: mov z0.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = udiv <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -544,27 +455,14 @@ define <4 x i16> @udiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: udiv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: uunpklo z1.s, z1.h ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = udiv <4 x i16> %op1, %op2 ret <4 x i16> %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; Although SVE immediate packing should be fully tested using scalable vectors, +; these tests protects against the possibility that scalable nodes, resulting +; from lowering fixed length vector operations, trigger different isel patterns. + +; +; ADD +; + +define void @add_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: add_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: add z1.b, z1.b, z0.b +; CHECK-NEXT: add z0.b, z2.b, z0.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i32 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = add <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @add_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: add_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: add z1.h, z1.h, z0.h +; CHECK-NEXT: add z0.h, z2.h, z0.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = add <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @add_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: add_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: add z1.s, z1.s, z0.s +; CHECK-NEXT: add z0.s, z2.s, z0.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = add <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @add_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: add_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: add z1.d, z1.d, z0.d +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = add <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; AND +; + +define void @and_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: and_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i32 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = and <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @and_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: and_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = and <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @and_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: and_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = and <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @and_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: and_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = and <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; ASHR +; + +define void @ashr_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: ashr_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: asr z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: asrr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i32 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = ashr <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @ashr_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: ashr_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: asr z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = ashr <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @ashr_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: ashr_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI10_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI10_0] +; CHECK-NEXT: asr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: asrr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = ashr <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @ashr_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: ashr_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI11_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI11_0] +; CHECK-NEXT: asr z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: asrr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = ashr <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; ICMP +; + +define void @icmp_eq_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: icmp_eq_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI12_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI12_0] +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z0.b +; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z0.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %cmp = icmp eq <32 x i8> %op1, %op2 + %res = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @icmp_sge_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: icmp_sge_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI13_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI13_0] +; CHECK-NEXT: cmpge p1.h, p0/z, z1.h, z0.h +; CHECK-NEXT: cmpge p0.h, p0/z, z2.h, z0.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %cmp = icmp sge <16 x i16> %op1, %op2 + %res = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @icmp_sgt_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: icmp_sgt_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: cmpgt p1.s, p0/z, z1.s, z0.s +; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 -8, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %cmp = icmp sgt <8 x i32> %op1, %op2 + %res = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @icmp_ult_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: icmp_ult_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI15_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI15_0] +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %cmp = icmp ult <4 x i64> %op1, %op2 + %res = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; LSHR +; + +define void @lshr_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: lshr_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI16_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI16_0] +; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: lsrr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = lshr <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @lshr_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: lshr_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI17_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: lsrr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = lshr <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @lshr_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: lshr_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lsrr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = lshr <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @lshr_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: lshr_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI19_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI19_0] +; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: lsrr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = lshr <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; MUL +; + +define void @mul_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: mul_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI20_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI20_0] +; CHECK-NEXT: mul z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = mul <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @mul_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: mul_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI21_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI21_0] +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = mul <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @mul_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: mul_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI22_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI22_0] +; CHECK-NEXT: mul z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = mul <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @mul_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: mul_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI23_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI23_0] +; CHECK-NEXT: mul z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = mul <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; OR +; + +define void @or_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: or_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI24_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI24_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = or <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @or_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: or_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI25_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI25_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = or <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @or_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: or_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI26_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI26_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = or <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @or_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: or_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI27_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = or <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SHL +; + +define void @shl_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: shl_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI28_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI28_0] +; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: lslr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = shl <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @shl_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: shl_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI29_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI29_0] +; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: lslr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = shl <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @shl_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: shl_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI30_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI30_0] +; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lslr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = shl <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @shl_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: shl_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI31_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI31_0] +; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: lslr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = shl <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SMAX +; + +define void @smax_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: smax_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI32_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI32_0] +; CHECK-NEXT: smax z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @smax_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: smax_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI33_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI33_0] +; CHECK-NEXT: smax z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @smax_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: smax_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI34_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI34_0] +; CHECK-NEXT: smax z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @smax_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: smax_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI35_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI35_0] +; CHECK-NEXT: smax z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SMIN +; + +define void @smin_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: smin_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI36_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI36_0] +; CHECK-NEXT: smin z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @smin_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: smin_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI37_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI37_0] +; CHECK-NEXT: smin z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @smin_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: smin_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI38_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI38_0] +; CHECK-NEXT: smin z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @smin_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: smin_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI39_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI39_0] +; CHECK-NEXT: smin z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SUB +; + +define void @sub_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: sub_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI40_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI40_0] +; CHECK-NEXT: sub z1.b, z1.b, z0.b +; CHECK-NEXT: sub z0.b, z2.b, z0.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = sub <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @sub_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: sub_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI41_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI41_0] +; CHECK-NEXT: sub z1.h, z1.h, z0.h +; CHECK-NEXT: sub z0.h, z2.h, z0.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = sub <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @sub_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: sub_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI42_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI42_0] +; CHECK-NEXT: sub z1.s, z1.s, z0.s +; CHECK-NEXT: sub z0.s, z2.s, z0.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = sub <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @sub_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: sub_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI43_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI43_0] +; CHECK-NEXT: sub z1.d, z1.d, z0.d +; CHECK-NEXT: sub z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = sub <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; UMAX +; + +define void @umax_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: umax_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI44_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI44_0] +; CHECK-NEXT: umax z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @umax_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: umax_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI45_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI45_0] +; CHECK-NEXT: umax z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @umax_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: umax_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI46_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI46_0] +; CHECK-NEXT: umax z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @umax_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: umax_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI47_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI47_0] +; CHECK-NEXT: umax z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; UMIN +; + +define void @umin_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: umin_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI48_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI48_0] +; CHECK-NEXT: umin z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @umin_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: umin_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI49_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI49_0] +; CHECK-NEXT: umin z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @umin_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: umin_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI50_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI50_0] +; CHECK-NEXT: umin z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @umin_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: umin_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI51_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI51_0] +; CHECK-NEXT: umin z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; XOR +; + +define void @xor_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: xor_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI52_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI52_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = xor <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @xor_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: xor_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI53_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI53_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = xor <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @xor_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: xor_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI54_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI54_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = xor <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @xor_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: xor_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI55_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI55_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = xor <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) + +declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) + +declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>) + +declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll @@ -7,7 +7,7 @@ ; SMAX ; -define <8 x i8> @smax_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +define <8 x i8> @smax_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v8i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -20,7 +20,7 @@ ret <8 x i8> %res } -define <16 x i8> @smax_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +define <16 x i8> @smax_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v16i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -33,15 +33,14 @@ ret <16 x i8> %res } -define void @smax_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +define void @smax_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.b, vl16 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smax z0.b, p0/m, z0.b, z2.b -; CHECK-NEXT: smax z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b @@ -50,7 +49,7 @@ ret void } -define <4 x i16> @smax_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +define <4 x i16> @smax_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -63,7 +62,7 @@ ret <4 x i16> %res } -define <8 x i16> @smax_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +define <8 x i16> @smax_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -76,15 +75,14 @@ ret <8 x i16> %res } -define void @smax_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +define void @smax_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smax z0.h, p0/m, z0.h, z2.h -; CHECK-NEXT: smax z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, <16 x i16>* %a %op2 = load <16 x i16>, <16 x i16>* %b @@ -93,7 +91,7 @@ ret void } -define <2 x i32> @smax_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +define <2 x i32> @smax_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -106,7 +104,7 @@ ret <2 x i32> %res } -define <4 x i32> @smax_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +define <4 x i32> @smax_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -119,15 +117,14 @@ ret <4 x i32> %res } -define void @smax_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +define void @smax_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smax z0.s, p0/m, z0.s, z2.s -; CHECK-NEXT: smax z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %op2 = load <8 x i32>, <8 x i32>* %b @@ -137,7 +134,7 @@ } ; Vector i64 max are not legal for NEON so use SVE when available. -define <1 x i64> @smax_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +define <1 x i64> @smax_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v1i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -151,7 +148,7 @@ } ; Vector i64 max are not legal for NEON so use SVE when available. -define <2 x i64> @smax_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +define <2 x i64> @smax_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -164,15 +161,14 @@ ret <2 x i64> %res } -define void @smax_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +define void @smax_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smax_v4i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smax z0.d, p0/m, z0.d, z2.d -; CHECK-NEXT: smax z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %op2 = load <4 x i64>, <4 x i64>* %b @@ -185,7 +181,7 @@ ; SMIN ; -define <8 x i8> @smin_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +define <8 x i8> @smin_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v8i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -198,7 +194,7 @@ ret <8 x i8> %res } -define <16 x i8> @smin_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +define <16 x i8> @smin_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v16i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -211,15 +207,14 @@ ret <16 x i8> %res } -define void @smin_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +define void @smin_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.b, vl16 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smin z0.b, p0/m, z0.b, z2.b -; CHECK-NEXT: smin z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b @@ -228,7 +223,7 @@ ret void } -define <4 x i16> @smin_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +define <4 x i16> @smin_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -241,7 +236,7 @@ ret <4 x i16> %res } -define <8 x i16> @smin_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +define <8 x i16> @smin_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -254,15 +249,14 @@ ret <8 x i16> %res } -define void @smin_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +define void @smin_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smin z0.h, p0/m, z0.h, z2.h -; CHECK-NEXT: smin z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, <16 x i16>* %a %op2 = load <16 x i16>, <16 x i16>* %b @@ -271,7 +265,7 @@ ret void } -define <2 x i32> @smin_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +define <2 x i32> @smin_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -284,7 +278,7 @@ ret <2 x i32> %res } -define <4 x i32> @smin_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +define <4 x i32> @smin_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -297,15 +291,14 @@ ret <4 x i32> %res } -define void @smin_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +define void @smin_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smin z0.s, p0/m, z0.s, z2.s -; CHECK-NEXT: smin z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %op2 = load <8 x i32>, <8 x i32>* %b @@ -315,7 +308,7 @@ } ; Vector i64 min are not legal for NEON so use SVE when available. -define <1 x i64> @smin_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +define <1 x i64> @smin_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v1i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -329,7 +322,7 @@ } ; Vector i64 min are not legal for NEON so use SVE when available. -define <2 x i64> @smin_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +define <2 x i64> @smin_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -342,15 +335,14 @@ ret <2 x i64> %res } -define void @smin_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +define void @smin_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: smin_v4i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: smin z0.d, p0/m, z0.d, z2.d -; CHECK-NEXT: smin z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %op2 = load <4 x i64>, <4 x i64>* %b @@ -363,7 +355,7 @@ ; UMAX ; -define <8 x i8> @umax_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +define <8 x i8> @umax_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v8i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -376,7 +368,7 @@ ret <8 x i8> %res } -define <16 x i8> @umax_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +define <16 x i8> @umax_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v16i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -389,15 +381,14 @@ ret <16 x i8> %res } -define void @umax_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +define void @umax_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.b, vl16 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umax z0.b, p0/m, z0.b, z2.b -; CHECK-NEXT: umax z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b @@ -406,7 +397,7 @@ ret void } -define <4 x i16> @umax_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +define <4 x i16> @umax_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -419,7 +410,7 @@ ret <4 x i16> %res } -define <8 x i16> @umax_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +define <8 x i16> @umax_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -432,15 +423,14 @@ ret <8 x i16> %res } -define void @umax_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +define void @umax_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umax z0.h, p0/m, z0.h, z2.h -; CHECK-NEXT: umax z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, <16 x i16>* %a %op2 = load <16 x i16>, <16 x i16>* %b @@ -449,7 +439,7 @@ ret void } -define <2 x i32> @umax_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +define <2 x i32> @umax_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -462,7 +452,7 @@ ret <2 x i32> %res } -define <4 x i32> @umax_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +define <4 x i32> @umax_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -475,15 +465,14 @@ ret <4 x i32> %res } -define void @umax_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +define void @umax_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umax z0.s, p0/m, z0.s, z2.s -; CHECK-NEXT: umax z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %op2 = load <8 x i32>, <8 x i32>* %b @@ -493,7 +482,7 @@ } ; Vector i64 max are not legal for NEON so use SVE when available. -define <1 x i64> @umax_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +define <1 x i64> @umax_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v1i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -507,7 +496,7 @@ } ; Vector i64 max are not legal for NEON so use SVE when available. -define <2 x i64> @umax_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +define <2 x i64> @umax_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -520,15 +509,14 @@ ret <2 x i64> %res } -define void @umax_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +define void @umax_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umax_v4i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umax z0.d, p0/m, z0.d, z2.d -; CHECK-NEXT: umax z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %op2 = load <4 x i64>, <4 x i64>* %b @@ -541,7 +529,7 @@ ; UMIN ; -define <8 x i8> @umin_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +define <8 x i8> @umin_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v8i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -554,7 +542,7 @@ ret <8 x i8> %res } -define <16 x i8> @umin_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +define <16 x i8> @umin_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v16i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -567,15 +555,14 @@ ret <16 x i8> %res } -define void @umin_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +define void @umin_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.b, vl16 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umin z0.b, p0/m, z0.b, z2.b -; CHECK-NEXT: umin z1.b, p0/m, z1.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b @@ -584,7 +571,7 @@ ret void } -define <4 x i16> @umin_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +define <4 x i16> @umin_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -597,7 +584,7 @@ ret <4 x i16> %res } -define <8 x i16> @umin_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +define <8 x i16> @umin_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -610,15 +597,14 @@ ret <8 x i16> %res } -define void @umin_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +define void @umin_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umin z0.h, p0/m, z0.h, z2.h -; CHECK-NEXT: umin z1.h, p0/m, z1.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, <16 x i16>* %a %op2 = load <16 x i16>, <16 x i16>* %b @@ -627,7 +613,7 @@ ret void } -define <2 x i32> @umin_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +define <2 x i32> @umin_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -640,7 +626,7 @@ ret <2 x i32> %res } -define <4 x i32> @umin_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +define <4 x i32> @umin_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -653,15 +639,14 @@ ret <4 x i32> %res } -define void @umin_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +define void @umin_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umin z0.s, p0/m, z0.s, z2.s -; CHECK-NEXT: umin z1.s, p0/m, z1.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %op2 = load <8 x i32>, <8 x i32>* %b @@ -671,7 +656,7 @@ } ; Vector i64 min are not legal for NEON so use SVE when available. -define <1 x i64> @umin_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +define <1 x i64> @umin_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v1i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 @@ -685,7 +670,7 @@ } ; Vector i64 min are not legal for NEON so use SVE when available. -define <2 x i64> @umin_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +define <2 x i64> @umin_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 @@ -698,15 +683,14 @@ ret <2 x i64> %res } -define void @umin_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +define void @umin_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: umin_v4i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q1, [x0] -; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: ldp q2, q3, [x1] -; CHECK-NEXT: umin z0.d, p0/m, z0.d, z2.d -; CHECK-NEXT: umin z1.d, p0/m, z1.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %op2 = load <4 x i64>, <4 x i64>* %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll @@ -77,131 +77,41 @@ define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { ; CHECK-LABEL: smulh_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: adrp x8, .LCPI3_0 ; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: sunpklo z0.h, z2.b -; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 -; CHECK-NEXT: sunpklo z2.h, z2.b -; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: sunpklo z4.h, z1.b +; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8 +; CHECK-NEXT: sunpklo z1.h, z1.b +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: sunpklo z5.h, z0.b +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: sunpklo z0.h, z0.b ; CHECK-NEXT: sunpklo z6.h, z3.b ; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8 ; CHECK-NEXT: sunpklo z3.h, z3.b -; CHECK-NEXT: sunpklo z1.h, z4.b -; CHECK-NEXT: ext z4.b, z4.b, z4.b, #8 -; CHECK-NEXT: sunpklo z4.h, z4.b -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: sunpklo z7.h, z5.b -; CHECK-NEXT: ext z5.b, z5.b, z5.b, #8 -; CHECK-NEXT: ldr q16, [x8, :lo12:.LCPI3_0] -; CHECK-NEXT: sunpklo z5.h, z5.b -; CHECK-NEXT: mul z3.h, p0/m, z3.h, z5.h -; CHECK-NEXT: movprfx z5, z6 +; CHECK-NEXT: sunpklo z7.h, z2.b +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: sunpklo z2.h, z2.b +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: movprfx z3, z4 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h ; CHECK-NEXT: mul z5.h, p0/m, z5.h, z7.h -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z4.h ; CHECK-NEXT: movprfx z4, z5 -; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z16.h -; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z16.h -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z5.h, z3.h[7] -; CHECK-NEXT: mov z6.h, z3.h[6] -; CHECK-NEXT: mov z7.h, z3.h[5] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w9, [sp, #16] -; CHECK-NEXT: strb w8, [sp, #24] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: mov z17.h, z3.h[4] -; CHECK-NEXT: mov z18.h, z3.h[3] -; CHECK-NEXT: mov z19.h, z3.h[2] -; CHECK-NEXT: strb w10, [sp, #31] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #30] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #29] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z3.h[1] -; CHECK-NEXT: mov z3.h, z4.h[7] -; CHECK-NEXT: mov z21.h, z4.h[6] -; CHECK-NEXT: strb w10, [sp, #28] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #27] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #26] -; CHECK-NEXT: fmov w9, s21 -; CHECK-NEXT: mov z22.h, z4.h[5] -; CHECK-NEXT: mov z23.h, z4.h[4] -; CHECK-NEXT: mov z24.h, z4.h[3] -; CHECK-NEXT: strb w10, [sp, #25] -; CHECK-NEXT: fmov w10, s22 -; CHECK-NEXT: strb w8, [sp, #23] -; CHECK-NEXT: fmov w8, s23 -; CHECK-NEXT: strb w9, [sp, #22] -; CHECK-NEXT: fmov w9, s24 -; CHECK-NEXT: mov z25.h, z4.h[2] -; CHECK-NEXT: mov z26.h, z4.h[1] -; CHECK-NEXT: strb w10, [sp, #21] -; CHECK-NEXT: fmov w10, s25 -; CHECK-NEXT: strb w8, [sp, #20] -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z16.h -; CHECK-NEXT: strb w9, [sp, #19] -; CHECK-NEXT: fmov w8, s26 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z16.h -; CHECK-NEXT: mov z2.h, z1.h[7] -; CHECK-NEXT: mov z3.h, z1.h[6] -; CHECK-NEXT: strb w10, [sp, #18] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strb w8, [sp, #17] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strb w9, [sp, #8] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z1.h[5] -; CHECK-NEXT: mov z5.h, z1.h[4] -; CHECK-NEXT: mov z6.h, z1.h[3] -; CHECK-NEXT: strb w10, [sp] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #15] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #14] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z1.h[2] -; CHECK-NEXT: mov z16.h, z1.h[1] -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: strb w10, [sp, #13] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #12] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #11] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z17.h, z0.h[6] -; CHECK-NEXT: mov z18.h, z0.h[5] -; CHECK-NEXT: mov z19.h, z0.h[4] -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #9] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z0.h[3] -; CHECK-NEXT: mov z21.h, z0.h[2] -; CHECK-NEXT: mov z22.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s21 -; CHECK-NEXT: strb w9, [sp, #4] -; CHECK-NEXT: fmov w9, s22 -; CHECK-NEXT: strb w10, [sp, #3] -; CHECK-NEXT: strb w8, [sp, #2] -; CHECK-NEXT: strb w9, [sp, #1] -; CHECK-NEXT: ldp q0, q1, [sp] -; CHECK-NEXT: stp q0, q1, [x0] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z2.h +; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z2.b, z3.b, z3.b +; CHECK-NEXT: uzp1 z3.b, z4.b, z4.b +; CHECK-NEXT: splice z2.b, p0, z2.b, z1.b +; CHECK-NEXT: splice z3.b, p0, z3.b, z0.b +; CHECK-NEXT: stp q2, q3, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b @@ -516,131 +426,41 @@ define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { ; CHECK-LABEL: umulh_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: adrp x8, .LCPI17_0 ; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: uunpklo z0.h, z2.b -; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 -; CHECK-NEXT: uunpklo z2.h, z2.b -; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: uunpklo z4.h, z1.b +; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8 +; CHECK-NEXT: uunpklo z1.h, z1.b +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: uunpklo z5.h, z0.b +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpklo z6.h, z3.b ; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8 ; CHECK-NEXT: uunpklo z3.h, z3.b -; CHECK-NEXT: uunpklo z1.h, z4.b -; CHECK-NEXT: ext z4.b, z4.b, z4.b, #8 -; CHECK-NEXT: uunpklo z4.h, z4.b -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: uunpklo z7.h, z5.b -; CHECK-NEXT: ext z5.b, z5.b, z5.b, #8 -; CHECK-NEXT: ldr q16, [x8, :lo12:.LCPI17_0] -; CHECK-NEXT: uunpklo z5.h, z5.b -; CHECK-NEXT: mul z3.h, p0/m, z3.h, z5.h -; CHECK-NEXT: movprfx z5, z6 +; CHECK-NEXT: uunpklo z7.h, z2.b +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: uunpklo z2.h, z2.b +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: movprfx z3, z4 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h ; CHECK-NEXT: mul z5.h, p0/m, z5.h, z7.h -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z4.h ; CHECK-NEXT: movprfx z4, z5 -; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z16.h -; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z16.h -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z5.h, z3.h[7] -; CHECK-NEXT: mov z6.h, z3.h[6] -; CHECK-NEXT: mov z7.h, z3.h[5] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w9, [sp, #16] -; CHECK-NEXT: strb w8, [sp, #24] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: mov z17.h, z3.h[4] -; CHECK-NEXT: mov z18.h, z3.h[3] -; CHECK-NEXT: mov z19.h, z3.h[2] -; CHECK-NEXT: strb w10, [sp, #31] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #30] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #29] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z3.h[1] -; CHECK-NEXT: mov z3.h, z4.h[7] -; CHECK-NEXT: mov z21.h, z4.h[6] -; CHECK-NEXT: strb w10, [sp, #28] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #27] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #26] -; CHECK-NEXT: fmov w9, s21 -; CHECK-NEXT: mov z22.h, z4.h[5] -; CHECK-NEXT: mov z23.h, z4.h[4] -; CHECK-NEXT: mov z24.h, z4.h[3] -; CHECK-NEXT: strb w10, [sp, #25] -; CHECK-NEXT: fmov w10, s22 -; CHECK-NEXT: strb w8, [sp, #23] -; CHECK-NEXT: fmov w8, s23 -; CHECK-NEXT: strb w9, [sp, #22] -; CHECK-NEXT: fmov w9, s24 -; CHECK-NEXT: mov z25.h, z4.h[2] -; CHECK-NEXT: mov z26.h, z4.h[1] -; CHECK-NEXT: strb w10, [sp, #21] -; CHECK-NEXT: fmov w10, s25 -; CHECK-NEXT: strb w8, [sp, #20] -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z16.h -; CHECK-NEXT: strb w9, [sp, #19] -; CHECK-NEXT: fmov w8, s26 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z16.h -; CHECK-NEXT: mov z2.h, z1.h[7] -; CHECK-NEXT: mov z3.h, z1.h[6] -; CHECK-NEXT: strb w10, [sp, #18] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strb w8, [sp, #17] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strb w9, [sp, #8] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z1.h[5] -; CHECK-NEXT: mov z5.h, z1.h[4] -; CHECK-NEXT: mov z6.h, z1.h[3] -; CHECK-NEXT: strb w10, [sp] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #15] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #14] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z1.h[2] -; CHECK-NEXT: mov z16.h, z1.h[1] -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: strb w10, [sp, #13] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #12] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #11] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z17.h, z0.h[6] -; CHECK-NEXT: mov z18.h, z0.h[5] -; CHECK-NEXT: mov z19.h, z0.h[4] -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #9] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z0.h[3] -; CHECK-NEXT: mov z21.h, z0.h[2] -; CHECK-NEXT: mov z22.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s21 -; CHECK-NEXT: strb w9, [sp, #4] -; CHECK-NEXT: fmov w9, s22 -; CHECK-NEXT: strb w10, [sp, #3] -; CHECK-NEXT: strb w8, [sp, #2] -; CHECK-NEXT: strb w9, [sp, #1] -; CHECK-NEXT: ldp q0, q1, [sp] -; CHECK-NEXT: stp q0, q1, [x0] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z2.h +; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z2.b, z3.b, z3.b +; CHECK-NEXT: uzp1 z3.b, z4.b, z4.b +; CHECK-NEXT: splice z2.b, p0, z2.b, z1.b +; CHECK-NEXT: splice z3.b, p0, z3.b, z0.b +; CHECK-NEXT: stp q2, q3, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll @@ -10,36 +10,22 @@ define <4 x i8> @srem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: srem_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI0_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: ptrue p1.s, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] -; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: asr z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: sunpklo z2.s, z1.h ; CHECK-NEXT: sunpklo z3.s, z0.h ; CHECK-NEXT: sdivr z2.s, p1/m, z2.s, z3.s -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = srem <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -48,8 +34,6 @@ define <8 x i8> @srem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: srem_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: sunpklo z2.h, z1.b @@ -63,33 +47,9 @@ ; CHECK-NEXT: sdivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.b, vl8 ; CHECK-NEXT: uzp1 z2.h, z2.h, z4.h -; CHECK-NEXT: mov z3.h, z2.h[7] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z2.h[6] -; CHECK-NEXT: mov z5.h, z2.h[5] -; CHECK-NEXT: mov z6.h, z2.h[4] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z2.h[3] -; CHECK-NEXT: mov z16.h, z2.h[2] -; CHECK-NEXT: mov z2.h, z2.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b ; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = srem <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -187,8 +147,6 @@ define <4 x i16> @srem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: srem_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -196,21 +154,9 @@ ; CHECK-NEXT: sunpklo z3.s, z0.h ; CHECK-NEXT: sdivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = srem <4 x i16> %op1, %op2 ret <4 x i16> %res @@ -379,34 +325,20 @@ define <4 x i8> @urem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: urem_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI13_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI13_0] -; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: uunpklo z2.s, z1.h ; CHECK-NEXT: uunpklo z3.s, z0.h ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = urem <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -415,8 +347,6 @@ define <8 x i8> @urem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: urem_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: uunpklo z2.h, z1.b @@ -430,33 +360,9 @@ ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.b, vl8 ; CHECK-NEXT: uzp1 z2.h, z2.h, z4.h -; CHECK-NEXT: mov z3.h, z2.h[7] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z2.h[6] -; CHECK-NEXT: mov z5.h, z2.h[5] -; CHECK-NEXT: mov z6.h, z2.h[4] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z2.h[3] -; CHECK-NEXT: mov z16.h, z2.h[2] -; CHECK-NEXT: mov z2.h, z2.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b ; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = urem <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -554,8 +460,6 @@ define <4 x i16> @urem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: urem_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -563,21 +467,9 @@ ; CHECK-NEXT: uunpklo z3.s, z0.h ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = urem <4 x i16> %op1, %op2 ret <4 x i16> %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll @@ -350,38 +350,18 @@ ret void } -define <1 x i64> @select_v1i64(<1 x i64> %op1, <1 x i64> %op2, i1 %mask) #0 { -; CHECK-LABEL: select_v1i64: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w0, #0x1 -; CHECK-NEXT: mov x9, #-1 -; CHECK-NEXT: csetm x8, ne -; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 -; CHECK-NEXT: fmov d3, x9 -; CHECK-NEXT: fmov d2, x8 -; CHECK-NEXT: eor z3.d, z2.d, z3.d -; CHECK-NEXT: and z0.d, z0.d, z2.d -; CHECK-NEXT: and z1.d, z1.d, z3.d -; CHECK-NEXT: orr z0.d, z0.d, z1.d -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: ret - %sel = select i1 %mask, <1 x i64> %op1, <1 x i64> %op2 - ret <1 x i64> %sel -} - define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, i1 %mask) #0 { ; CHECK-LABEL: select_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: tst w0, #0x1 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 -; CHECK-NEXT: adrp x9, .LCPI12_0 +; CHECK-NEXT: adrp x9, .LCPI11_0 ; CHECK-NEXT: csetm x8, ne ; CHECK-NEXT: stp x8, x8, [sp, #-16]! ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q2, [sp] -; CHECK-NEXT: ldr q3, [x9, :lo12:.LCPI12_0] +; CHECK-NEXT: ldr q3, [x9, :lo12:.LCPI11_0] ; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: eor z3.d, z2.d, z3.d ; CHECK-NEXT: and z1.d, z1.d, z3.d @@ -401,11 +381,11 @@ ; CHECK-NEXT: csetm x8, ne ; CHECK-NEXT: ldr q1, [x0, #16] ; CHECK-NEXT: ldr q2, [x1] -; CHECK-NEXT: adrp x9, .LCPI13_0 +; CHECK-NEXT: adrp x9, .LCPI12_0 ; CHECK-NEXT: ldr q3, [x1, #16] ; CHECK-NEXT: stp x8, x8, [sp, #-16]! ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI13_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI12_0] ; CHECK-NEXT: ldr q5, [sp] ; CHECK-NEXT: eor z4.d, z5.d, z4.d ; CHECK-NEXT: and z1.d, z1.d, z5.d diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll @@ -255,24 +255,11 @@ define <2 x half> @ucvtf_v2i32_v2f16(<2 x i32> %op1) #0 { ; CHECK-LABEL: ucvtf_v2i32_v2f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ucvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = uitofp <2 x i32> %op1 to <2 x half> ret <2 x half> %res @@ -281,24 +268,11 @@ define <4 x half> @ucvtf_v4i32_v4f16(<4 x i32> %op1) #0 { ; CHECK-LABEL: ucvtf_v4i32_v4f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ucvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = uitofp <4 x i32> %op1 to <4 x half> ret <4 x half> %res @@ -307,35 +281,15 @@ define <8 x half> @ucvtf_v8i32_v8f16(<8 x i32>* %a) #0 { ; CHECK-LABEL: ucvtf_v8i32_v8f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ucvtf z1.h, p0/m, z1.s -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z5.s, z1.s[2] ; CHECK-NEXT: ucvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z2.s, z0.s[3] -; CHECK-NEXT: mov z3.s, z0.s[2] -; CHECK-NEXT: mov z4.s, z0.s[1] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: strh w9, [sp, #2] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z2.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z2.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %res = uitofp <8 x i32> %op1 to <8 x half> @@ -345,64 +299,21 @@ define void @ucvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 { ; CHECK-LABEL: ucvtf_v16i32_v16f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ptrue p1.h, vl4 ; CHECK-NEXT: ucvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z5.s, z0.s[2] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0, #32] ; CHECK-NEXT: ucvtf z1.h, p0/m, z1.s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov z2.s, z1.s[3] -; CHECK-NEXT: mov z3.s, z1.s[2] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: ldp q6, q7, [x0, #32] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z4.s, z1.s[1] -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: movprfx z1, z7 -; CHECK-NEXT: ucvtf z1.h, p0/m, z7.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z2.s, z1.s[2] -; CHECK-NEXT: mov z3.s, z1.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: movprfx z1, z6 -; CHECK-NEXT: ucvtf z1.h, p0/m, z6.s -; CHECK-NEXT: fmov w10, s1 -; CHECK-NEXT: strh w8, [sp, #2] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z4.s, z1.s[3] -; CHECK-NEXT: strh w9, [sp, #24] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strh w10, [sp, #16] -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: mov z5.s, z1.s[2] -; CHECK-NEXT: mov z6.s, z1.s[1] -; CHECK-NEXT: strh w8, [sp, #30] -; CHECK-NEXT: fmov w8, s4 -; CHECK-NEXT: strh w9, [sp, #28] -; CHECK-NEXT: fmov w9, s5 -; CHECK-NEXT: strh w10, [sp, #26] -; CHECK-NEXT: fmov w10, s6 -; CHECK-NEXT: strh w8, [sp, #22] -; CHECK-NEXT: strh w9, [sp, #20] -; CHECK-NEXT: strh w10, [sp, #18] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p1, z0.h, z1.h +; CHECK-NEXT: ucvtf z3.h, p0/m, z3.s +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: ucvtf z2.h, p0/m, z2.s +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p1, z3.h, z2.h +; CHECK-NEXT: stp q0, q3, [x1] ; CHECK-NEXT: ret %op1 = load <16 x i32>, <16 x i32>* %a %res = uitofp <16 x i32> %op1 to <16 x half> @@ -540,37 +451,19 @@ define <4 x half> @ucvtf_v4i64_v4f16(<4 x i64>* %a) #0 { ; CHECK-LABEL: ucvtf_v4i64_v4f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ucvtf z1.s, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: ucvtf z0.s, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: ldr q0, [sp] -; CHECK-NEXT: fcvt z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #24] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #30] -; CHECK-NEXT: strh w10, [sp, #28] -; CHECK-NEXT: strh w8, [sp, #26] -; CHECK-NEXT: ldr d0, [sp, #24] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: movprfx z0, z1 +; CHECK-NEXT: fcvt z0.h, p0/m, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %res = uitofp <4 x i64> %op1 to <4 x half> @@ -580,62 +473,29 @@ define <8 x half> @ucvtf_v8i64_v8f16(<8 x i64>* %a) #0 { ; CHECK-LABEL: ucvtf_v8i64_v8f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: .cfi_def_cfa_offset 48 -; CHECK-NEXT: ldp q1, q0, [x0, #32] +; CHECK-NEXT: ldp q0, q1, [x0, #32] ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: ucvtf z1.s, p0/m, z1.d -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: ptrue p1.s, vl2 +; CHECK-NEXT: ptrue p2.s ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: fmov x10, d4 -; CHECK-NEXT: fmov x12, d0 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: ucvtf z1.s, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p1, z0.s, z1.s +; CHECK-NEXT: ucvtf z3.s, p0/m, z3.d +; CHECK-NEXT: fcvt z0.h, p2/m, z0.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s ; CHECK-NEXT: ucvtf z2.s, p0/m, z2.d +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p1, z3.s, z2.s ; CHECK-NEXT: movprfx z1, z3 -; CHECK-NEXT: ucvtf z1.s, p0/m, z3.d -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: stp w8, w10, [sp, #24] -; CHECK-NEXT: mov z3.d, z1.d[1] -; CHECK-NEXT: stp w9, w12, [sp, #16] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: ldr q0, [sp, #16] -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: fmov x8, d3 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fcvt z0.h, p0/m, z0.s -; CHECK-NEXT: stp w11, w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: stp w9, w10, [sp] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: strh w8, [sp, #40] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: ldr q1, [sp] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strh w8, [sp, #46] -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: fcvt z0.h, p0/m, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z3.s, z0.s[1] -; CHECK-NEXT: strh w9, [sp, #44] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w10, [sp, #42] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #32] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strh w9, [sp, #38] -; CHECK-NEXT: strh w10, [sp, #36] -; CHECK-NEXT: strh w8, [sp, #34] -; CHECK-NEXT: ldr q0, [sp, #32] -; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: fcvt z1.h, p2/m, z3.s +; CHECK-NEXT: uzp1 z2.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z2.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <8 x i64>, <8 x i64>* %a %res = uitofp <8 x i64> %op1 to <8 x half> @@ -649,17 +509,11 @@ define <2 x float> @ucvtf_v2i64_v2f32(<2 x i64> %op1) #0 { ; CHECK-LABEL: ucvtf_v2i64_v2f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = uitofp <2 x i64> %op1 to <2 x float> ret <2 x float> %res @@ -668,22 +522,15 @@ define <4 x float> @ucvtf_v4i64_v4f32(<4 x i64>* %a) #0 { ; CHECK-LABEL: ucvtf_v4i64_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ucvtf z1.s, p0/m, z1.d ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: ucvtf z0.s, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z2.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z2.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %res = uitofp <4 x i64> %op1 to <4 x float> @@ -693,37 +540,21 @@ define void @ucvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 { ; CHECK-LABEL: ucvtf_v8i64_v8f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q0, q1, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s, vl2 ; CHECK-NEXT: ucvtf z0.s, p0/m, z0.d +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ldp q3, q2, [x0, #32] -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: ucvtf z0.s, p0/m, z1.d -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: ucvtf z1.s, p0/m, z2.d -; CHECK-NEXT: movprfx z2, z3 -; CHECK-NEXT: ucvtf z2.s, p0/m, z3.d -; CHECK-NEXT: fmov x9, d4 -; CHECK-NEXT: mov z3.d, z0.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d3 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: fmov x10, d2 -; CHECK-NEXT: fmov x11, d0 -; CHECK-NEXT: stp w8, w9, [sp, #24] -; CHECK-NEXT: stp w10, w11, [sp, #16] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ucvtf z1.s, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p1, z0.s, z1.s +; CHECK-NEXT: ucvtf z3.s, p0/m, z3.d +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: ucvtf z2.s, p0/m, z2.d +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p1, z3.s, z2.s +; CHECK-NEXT: stp q0, q3, [x1] ; CHECK-NEXT: ret %op1 = load <8 x i64>, <8 x i64>* %a %res = uitofp <8 x i64> %op1 to <8 x float> @@ -1004,24 +835,11 @@ define <2 x half> @scvtf_v2i32_v2f16(<2 x i32> %op1) #0 { ; CHECK-LABEL: scvtf_v2i32_v2f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: scvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sitofp <2 x i32> %op1 to <2 x half> ret <2 x half> %res @@ -1030,24 +848,11 @@ define <4 x half> @scvtf_v4i32_v4f16(<4 x i32> %op1) #0 { ; CHECK-LABEL: scvtf_v4i32_v4f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: scvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sitofp <4 x i32> %op1 to <4 x half> ret <4 x half> %res @@ -1056,35 +861,15 @@ define <8 x half> @scvtf_v8i32_v8f16(<8 x i32>* %a) #0 { ; CHECK-LABEL: scvtf_v8i32_v8f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: scvtf z1.h, p0/m, z1.s -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z5.s, z1.s[2] ; CHECK-NEXT: scvtf z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z2.s, z0.s[3] -; CHECK-NEXT: mov z3.s, z0.s[2] -; CHECK-NEXT: mov z4.s, z0.s[1] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w9, [sp] -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z1.s, z1.s[1] -; CHECK-NEXT: strh w10, [sp, #14] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strh w9, [sp, #10] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w10, [sp, #6] -; CHECK-NEXT: strh w8, [sp, #4] -; CHECK-NEXT: strh w9, [sp, #2] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z2.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z2.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %res = sitofp <8 x i32> %op1 to <8 x half> @@ -1263,37 +1048,19 @@ define <4 x half> @scvtf_v4i64_v4f16(<4 x i64>* %a) #0 { ; CHECK-LABEL: scvtf_v4i64_v4f16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: scvtf z1.s, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s ; CHECK-NEXT: scvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: scvtf z0.s, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: ldr q0, [sp] -; CHECK-NEXT: fcvt z0.h, p0/m, z0.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #24] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #30] -; CHECK-NEXT: strh w10, [sp, #28] -; CHECK-NEXT: strh w8, [sp, #26] -; CHECK-NEXT: ldr d0, [sp, #24] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: movprfx z0, z1 +; CHECK-NEXT: fcvt z0.h, p0/m, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %res = sitofp <4 x i64> %op1 to <4 x half> @@ -1307,17 +1074,11 @@ define <2 x float> @scvtf_v2i64_v2f32(<2 x i64> %op1) #0 { ; CHECK-LABEL: scvtf_v2i64_v2f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: scvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sitofp <2 x i64> %op1 to <2 x float> ret <2 x float> %res @@ -1326,22 +1087,15 @@ define <4 x float> @scvtf_v4i64_v4f32(<4 x i64>* %a) #0 { ; CHECK-LABEL: scvtf_v4i64_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: scvtf z1.s, p0/m, z1.d ; CHECK-NEXT: scvtf z0.s, p0/m, z0.d -; CHECK-NEXT: mov z2.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: movprfx z0, z1 -; CHECK-NEXT: scvtf z0.s, p0/m, z1.d -; CHECK-NEXT: mov z1.d, z0.d[1] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: stp w8, w9, [sp, #8] -; CHECK-NEXT: stp w10, w11, [sp] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z2.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z2.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %res = sitofp <4 x i64> %op1 to <4 x float> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll @@ -72,7 +72,7 @@ ret <16 x i8> %sel } -define void @select_v32i8(ptr %a, ptr %b) #0 { +define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { ; CHECK-LABEL: select_v32i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q1, q0, [x1] @@ -94,11 +94,11 @@ ; CHECK-NEXT: orr z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret - %op1 = load <32 x i8>, ptr %a - %op2 = load <32 x i8>, ptr %b + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b %mask = icmp eq <32 x i8> %op1, %op2 %sel = select <32 x i1> %mask, <32 x i8> %op1, <32 x i8> %op2 - store <32 x i8> %sel, ptr %a + store <32 x i8> %sel, <32 x i8>* %a ret void } @@ -172,7 +172,7 @@ ret <8 x i16> %sel } -define void @select_v16i16(ptr %a, ptr %b) #0 { +define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { ; CHECK-LABEL: select_v16i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q1, q0, [x1] @@ -194,11 +194,11 @@ ; CHECK-NEXT: orr z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret - %op1 = load <16 x i16>, ptr %a - %op2 = load <16 x i16>, ptr %b + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b %mask = icmp eq <16 x i16> %op1, %op2 %sel = select <16 x i1> %mask, <16 x i16> %op1, <16 x i16> %op2 - store <16 x i16> %sel, ptr %a + store <16 x i16> %sel, <16 x i16>* %a ret void } @@ -249,7 +249,7 @@ ret <4 x i32> %sel } -define void @select_v8i32(ptr %a, ptr %b) #0 { +define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { ; CHECK-LABEL: select_v8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q1, q0, [x1] @@ -271,11 +271,11 @@ ; CHECK-NEXT: orr z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret - %op1 = load <8 x i32>, ptr %a - %op2 = load <8 x i32>, ptr %b + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b %mask = icmp eq <8 x i32> %op1, %op2 %sel = select <8 x i1> %mask, <8 x i32> %op1, <8 x i32> %op2 - store <8 x i32> %sel, ptr %a + store <8 x i32> %sel, <8 x i32>* %a ret void } @@ -323,7 +323,7 @@ ret <2 x i64> %sel } -define void @select_v4i64(ptr %a, ptr %b) #0 { +define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { ; CHECK-LABEL: select_v4i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ldp q1, q0, [x1] @@ -345,11 +345,11 @@ ; CHECK-NEXT: orr z0.d, z2.d, z0.d ; CHECK-NEXT: stp q1, q0, [x0] ; CHECK-NEXT: ret - %op1 = load <4 x i64>, ptr %a - %op2 = load <4 x i64>, ptr %b + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b %mask = icmp eq <4 x i64> %op1, %op2 %sel = select <4 x i1> %mask, <4 x i64> %op1, <4 x i64> %op2 - store <4 x i64> %sel, ptr %a + store <4 x i64> %sel, <4 x i64>* %a ret void } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +declare void @def(ptr) + +define void @st1d_fixed(ptr %st_ptr) #0 { +; CHECK-LABEL: st1d_fixed: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-32]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: sub sp, sp, #128 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: bl def +; CHECK-NEXT: cntd x8 +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: sub x8, x8, #2 +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp] +; CHECK-NEXT: mov w9, #2 +; CHECK-NEXT: cmp x8, #2 +; CHECK-NEXT: csel x8, x8, x9, lo +; CHECK-NEXT: add x10, sp, #128 +; CHECK-NEXT: lsl x8, x8, #3 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: add x9, sp, #128 +; CHECK-NEXT: st1d { z0.d }, p0, [x10] +; CHECK-NEXT: ldr q2, [x9, x8] +; CHECK-NEXT: stp q0, q2, [x19] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #128 +; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldr x29, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: ret + %alloc = alloca [16 x double] + call void @def(ptr %alloc) + %load = load <8 x double>, ptr %alloc + %strided.vec = shufflevector <8 x double> %load, <8 x double> poison, <4 x i32> + store <4 x double> %strided.vec, ptr %st_ptr + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll @@ -0,0 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define <4 x i32> @test(<16 x i32>* %arg1, <16 x i32>* %arg2) { +; CHECK-LABEL: test: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldp q2, q1, [x0, #32] +; CHECK-NEXT: add z2.s, z2.s, z2.s +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: mov z0.s, z1.s[2] +; CHECK-NEXT: add z1.s, z1.s, z1.s +; CHECK-NEXT: stp q2, q1, [x0, #32] +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: add z2.s, z3.s, z3.s +; CHECK-NEXT: add z1.s, z4.s, z4.s +; CHECK-NEXT: stp q2, q1, [x0] +; CHECK-NEXT: ret +entry: + %0 = load <16 x i32>, <16 x i32>* %arg1, align 256 + %1 = load <16 x i32>, <16 x i32>* %arg2, align 256 + %shvec = shufflevector <16 x i32> %0, <16 x i32> %1, <4 x i32> + %2 = add <16 x i32> %0, %0 + store <16 x i32> %2, <16 x i32>* %arg1, align 256 + ret <4 x i32> %shvec +} + +define <2 x i32> @test2(<16 x i32>* %arg1, <16 x i32>* %arg2) { +; CHECK-LABEL: test2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldp q2, q0, [x0, #32] +; CHECK-NEXT: ldp q4, q5, [x0] +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: add z3.s, z0.s, z0.s +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: mov z0.s, s1 +; CHECK-NEXT: add z1.s, z2.s, z2.s +; CHECK-NEXT: stp q1, q3, [x0, #32] +; CHECK-NEXT: add z1.s, z4.s, z4.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: add z2.s, z5.s, z5.s +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret +entry: + %0 = load <16 x i32>, <16 x i32>* %arg1, align 256 + %1 = load <16 x i32>, <16 x i32>* %arg2, align 256 + %shvec = shufflevector <16 x i32> %0, <16 x i32> %1, <2 x i32> + %2 = add <16 x i32> %0, %0 + store <16 x i32> %2, <16 x i32>* %arg1, align 256 + ret <2 x i32> %shvec +} diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll @@ -0,0 +1,561 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; ANDV +; + +define i8 @andv_v4i8(<4 x i8> %a) #0 { +; CHECK-LABEL: andv_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: andv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %a) + ret i8 %res +} + +define i8 @andv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: andv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: andv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @andv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: andv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: andv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @andv_v32i8(ptr %a) #0 { +; CHECK-LABEL: andv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: and z0.d, z1.d, z0.d +; CHECK-NEXT: andv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, ptr %a + %res = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @andv_v2i16(<2 x i16> %a) #0 { +; CHECK-LABEL: andv_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: andv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %a) + ret i16 %res +} + +define i16 @andv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: andv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: andv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @andv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: andv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: andv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @andv_v16i16(ptr %a) #0 { +; CHECK-LABEL: andv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: and z0.d, z1.d, z0.d +; CHECK-NEXT: andv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, ptr %a + %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @andv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: andv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: andv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @andv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: andv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: andv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @andv_v8i32(ptr %a) #0 { +; CHECK-LABEL: andv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: and z0.d, z1.d, z0.d +; CHECK-NEXT: andv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, ptr %a + %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %op) + ret i32 %res +} + +define i64 @andv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: andv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: andv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @andv_v4i64(ptr %a) #0 { +; CHECK-LABEL: andv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: and z0.d, z1.d, z0.d +; CHECK-NEXT: andv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, ptr %a + %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %op) + ret i64 %res +} + +; +; EORV +; + +define i8 @eorv_v4i8(<4 x i8> %a) #0 { +; CHECK-LABEL: eorv_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: eorv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %a) + ret i8 %res +} + +define i8 @eorv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: eorv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: eorv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @eorv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: eorv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: eorv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @eorv_v32i8(ptr %a) #0 { +; CHECK-LABEL: eorv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: eor z0.d, z1.d, z0.d +; CHECK-NEXT: eorv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, ptr %a + %res = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @eorv_v2i16(<2 x i16> %a) #0 { +; CHECK-LABEL: eorv_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: eorv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %a) + ret i16 %res +} + +define i16 @eorv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: eorv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: eorv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @eorv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: eorv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: eorv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @eorv_v16i16(ptr %a) #0 { +; CHECK-LABEL: eorv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: eor z0.d, z1.d, z0.d +; CHECK-NEXT: eorv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, ptr %a + %res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @eorv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: eorv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: eorv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @eorv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: eorv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: eorv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @eorv_v8i32(ptr %a) #0 { +; CHECK-LABEL: eorv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: eor z0.d, z1.d, z0.d +; CHECK-NEXT: eorv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, ptr %a + %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %op) + ret i32 %res +} + +define i64 @eorv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: eorv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: eorv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @eorv_v4i64(ptr %a) #0 { +; CHECK-LABEL: eorv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: eor z0.d, z1.d, z0.d +; CHECK-NEXT: eorv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, ptr %a + %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %op) + ret i64 %res +} + +; +; ORV +; + +define i8 @orv_v4i8(<4 x i8> %a) #0 { +; CHECK-LABEL: orv_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: orv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %a) + ret i8 %res +} + +define i8 @orv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: orv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @orv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: orv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @orv_v32i8(ptr %a) #0 { +; CHECK-LABEL: orv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: orr z0.d, z1.d, z0.d +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, ptr %a + %res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @orv_v2i16(<2 x i16> %a) #0 { +; CHECK-LABEL: orv_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: orv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %a) + ret i16 %res +} + +define i16 @orv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: orv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: orv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @orv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: orv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: orv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @orv_v16i16(ptr %a) #0 { +; CHECK-LABEL: orv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: orr z0.d, z1.d, z0.d +; CHECK-NEXT: orv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, ptr %a + %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @orv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: orv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: orv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @orv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: orv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: orv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @orv_v8i32(ptr %a) #0 { +; CHECK-LABEL: orv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: orr z0.d, z1.d, z0.d +; CHECK-NEXT: orv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, ptr %a + %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %op) + ret i32 %res +} + +define i64 @orv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: orv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: orv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @orv_v4i64(ptr %a) #0 { +; CHECK-LABEL: orv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: orr z0.d, z1.d, z0.d +; CHECK-NEXT: orv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, ptr %a + %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %op) + ret i64 %res +} + +attributes #0 = { "target-features"="+sve" } + +declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>) +declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>) +declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>) + +declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>) +declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>) +declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>) + +declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>) +declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>) +declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll @@ -0,0 +1,368 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @add_v4i8(<4 x i8>* %a, <4 x i8>* %b) #0 { +; CHECK-LABEL: add_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr s1, [x1] +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z1.h, z1.b +; CHECK-NEXT: add z0.h, z0.h, z1.h +; CHECK-NEXT: st1b { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i8>, <4 x i8>* %a + %op2 = load <4 x i8>, <4 x i8>* %b + %res = add <4 x i8> %op1, %op2 + store <4 x i8> %res, <4 x i8>* %a + ret void +} + +define void @add_v8i8(<8 x i8>* %a, <8 x i8>* %b) #0 { +; CHECK-LABEL: add_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: add z0.b, z0.b, z1.b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i8>, <8 x i8>* %a + %op2 = load <8 x i8>, <8 x i8>* %b + %res = add <8 x i8> %op1, %op2 + store <8 x i8> %res, <8 x i8>* %a + ret void +} + +define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b) #0 { +; CHECK-LABEL: add_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: add z0.b, z0.b, z1.b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i8>, <16 x i8>* %a + %op2 = load <16 x i8>, <16 x i8>* %b + %res = add <16 x i8> %op1, %op2 + store <16 x i8> %res, <16 x i8>* %a + ret void +} + +define void @add_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: add_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: add z0.b, z0.b, z2.b +; CHECK-NEXT: add z1.b, z1.b, z3.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = add <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @add_v2i16(<2 x i16>* %a, <2 x i16>* %b, <2 x i16>* %c) #0 { +; CHECK-LABEL: add_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w8, [x0, #2] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w8, [sp, #4] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp] +; CHECK-NEXT: ldrh w8, [x1, #2] +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrh w8, [x1] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldp d0, d1, [sp] +; CHECK-NEXT: add z0.s, z0.s, z1.s +; CHECK-NEXT: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %op1 = load <2 x i16>, <2 x i16>* %a + %op2 = load <2 x i16>, <2 x i16>* %b + %res = add <2 x i16> %op1, %op2 + store <2 x i16> %res, <2 x i16>* %a + ret void +} + +define void @add_v4i16(<4 x i16>* %a, <4 x i16>* %b, <4 x i16>* %c) #0 { +; CHECK-LABEL: add_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: add z0.h, z0.h, z1.h +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i16>, <4 x i16>* %a + %op2 = load <4 x i16>, <4 x i16>* %b + %res = add <4 x i16> %op1, %op2 + store <4 x i16> %res, <4 x i16>* %a + ret void +} + +define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) #0 { +; CHECK-LABEL: add_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: add z0.h, z0.h, z1.h +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i16>, <8 x i16>* %a + %op2 = load <8 x i16>, <8 x i16>* %b + %res = add <8 x i16> %op1, %op2 + store <8 x i16> %res, <8 x i16>* %a + ret void +} + +define void @add_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i16>* %c) #0 { +; CHECK-LABEL: add_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: add z0.h, z0.h, z2.h +; CHECK-NEXT: add z1.h, z1.h, z3.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = add <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @abs_v2i32(<2 x i32>* %a) #0 { +; CHECK-LABEL: abs_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: abs z0.s, p0/m, z0.s +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %op1 = load <2 x i32>, <2 x i32>* %a + %res = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %op1, i1 false) + store <2 x i32> %res, <2 x i32>* %a + ret void +} + +define void @abs_v4i32(<4 x i32>* %a) #0 { +; CHECK-LABEL: abs_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: abs z0.s, p0/m, z0.s +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i32>, <4 x i32>* %a + %res = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %op1, i1 false) + store <4 x i32> %res, <4 x i32>* %a + ret void +} + +define void @abs_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: abs_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: abs z0.s, p0/m, z0.s +; CHECK-NEXT: abs z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %op1, i1 false) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @abs_v2i64(<2 x i64>* %a) #0 { +; CHECK-LABEL: abs_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: abs z0.d, p0/m, z0.d +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <2 x i64>, <2 x i64>* %a + %res = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %op1, i1 false) + store <2 x i64> %res, <2 x i64>* %a + ret void +} + +define void @abs_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: abs_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: abs z0.d, p0/m, z0.d +; CHECK-NEXT: abs z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %op1, i1 false) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +define void @fadd_v2f16(<2 x half>* %a, <2 x half>* %b) #0 { +; CHECK-LABEL: fadd_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr s1, [x1] +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str w8, [x0] +; CHECK-NEXT: ret + %op1 = load <2 x half>, <2 x half>* %a + %op2 = load <2 x half>, <2 x half>* %b + %res = fadd <2 x half> %op1, %op2 + store <2 x half> %res, <2 x half>* %a + ret void +} + +define void @fadd_v4f16(<4 x half>* %a, <4 x half>* %b) #0 { +; CHECK-LABEL: fadd_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x half>, <4 x half>* %a + %op2 = load <4 x half>, <4 x half>* %b + %res = fadd <4 x half> %op1, %op2 + store <4 x half> %res, <4 x half>* %a + ret void +} + +define void @fadd_v8f16(<8 x half>* %a, <8 x half>* %b) #0 { +; CHECK-LABEL: fadd_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x half>, <8 x half>* %a + %op2 = load <8 x half>, <8 x half>* %b + %res = fadd <8 x half> %op1, %op2 + store <8 x half> %res, <8 x half>* %a + ret void +} + +define void @fadd_v16f16(<16 x half>* %a, <16 x half>* %b) #0 { +; CHECK-LABEL: fadd_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b + %res = fadd <16 x half> %op1, %op2 + store <16 x half> %res, <16 x half>* %a + ret void +} + +define void @fadd_v2f32(<2 x float>* %a, <2 x float>* %b) #0 { +; CHECK-LABEL: fadd_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %op1 = load <2 x float>, <2 x float>* %a + %op2 = load <2 x float>, <2 x float>* %b + %res = fadd <2 x float> %op1, %op2 + store <2 x float> %res, <2 x float>* %a + ret void +} + +define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b) #0 { +; CHECK-LABEL: fadd_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x float>, <4 x float>* %a + %op2 = load <4 x float>, <4 x float>* %b + %res = fadd <4 x float> %op1, %op2 + store <4 x float> %res, <4 x float>* %a + ret void +} + +define void @fadd_v8f32(<8 x float>* %a, <8 x float>* %b) #0 { +; CHECK-LABEL: fadd_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b + %res = fadd <8 x float> %op1, %op2 + store <8 x float> %res, <8 x float>* %a + ret void +} + +define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b) #0 { +; CHECK-LABEL: fadd_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %op1 = load <2 x double>, <2 x double>* %a + %op2 = load <2 x double>, <2 x double>* %b + %res = fadd <2 x double> %op1, %op2 + store <2 x double> %res, <2 x double>* %a + ret void +} + +define void @fadd_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: fadd_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %res = fadd <4 x double> %op1, %op2 + store <4 x double> %res, <4 x double>* %a + ret void +} + +declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1) +declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) +declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) +declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll @@ -0,0 +1,577 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; REVB pattern for shuffle v32i8 -> v16i16 +define void @test_revbv16i16(<32 x i8>* %a) #0 { +; CHECK-LABEL: test_revbv16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: revb z0.h, p0/m, z0.h +; CHECK-NEXT: revb z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <32 x i8>, <32 x i8>* %a + %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> + store <32 x i8> %tmp2, <32 x i8>* %a + ret void +} + +; REVB pattern for shuffle v32i8 -> v8i32 +define void @test_revbv8i32(<32 x i8>* %a) #0 { +; CHECK-LABEL: test_revbv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: revb z0.s, p0/m, z0.s +; CHECK-NEXT: revb z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <32 x i8>, <32 x i8>* %a + %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> + store <32 x i8> %tmp2, <32 x i8>* %a + ret void +} + +; REVB pattern for shuffle v32i8 -> v4i64 +define void @test_revbv4i64(<32 x i8>* %a) #0 { +; CHECK-LABEL: test_revbv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revb z0.d, p0/m, z0.d +; CHECK-NEXT: revb z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <32 x i8>, <32 x i8>* %a + %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> + store <32 x i8> %tmp2, <32 x i8>* %a + ret void +} + +; REVH pattern for shuffle v16i16 -> v8i32 +define void @test_revhv8i32(<16 x i16>* %a) #0 { +; CHECK-LABEL: test_revhv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: revh z0.s, p0/m, z0.s +; CHECK-NEXT: revh z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <16 x i16>, <16 x i16>* %a + %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> + store <16 x i16> %tmp2, <16 x i16>* %a + ret void +} + +; REVH pattern for shuffle v16f16 -> v8f32 +define void @test_revhv8f32(<16 x half>* %a) #0 { +; CHECK-LABEL: test_revhv8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: revh z0.s, p0/m, z0.s +; CHECK-NEXT: revh z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <16 x half>, <16 x half>* %a + %tmp2 = shufflevector <16 x half> %tmp1, <16 x half> undef, <16 x i32> + store <16 x half> %tmp2, <16 x half>* %a + ret void +} + +; REVH pattern for shuffle v16i16 -> v4i64 +define void @test_revhv4i64(<16 x i16>* %a) #0 { +; CHECK-LABEL: test_revhv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revh z0.d, p0/m, z0.d +; CHECK-NEXT: revh z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <16 x i16>, <16 x i16>* %a + %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> + store <16 x i16> %tmp2, <16 x i16>* %a + ret void +} + +; REVW pattern for shuffle v8i32 -> v4i64 +define void @test_revwv4i64(<8 x i32>* %a) #0 { +; CHECK-LABEL: test_revwv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revw z0.d, p0/m, z0.d +; CHECK-NEXT: revw z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + store <8 x i32> %tmp2, <8 x i32>* %a + ret void +} + +; REVW pattern for shuffle v8f32 -> v4f64 +define void @test_revwv4f64(<8 x float>* %a) #0 { +; CHECK-LABEL: test_revwv4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revw z0.d, p0/m, z0.d +; CHECK-NEXT: revw z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <8 x float>, <8 x float>* %a + %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> undef, <8 x i32> + store <8 x float> %tmp2, <8 x float>* %a + ret void +} + +; Don't use SVE for 128-bit vectors +define <16 x i8> @test_revv16i8(<16 x i8>* %a) #0 { +; CHECK-LABEL: test_revv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revb z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %tmp1 = load <16 x i8>, <16 x i8>* %a + %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> + ret <16 x i8> %tmp2 +} + +; REVW pattern for shuffle two v8i32 inputs with the second input available. +define void @test_revwv8i32v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: test_revwv8i32v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revw z0.d, p0/m, z0.d +; CHECK-NEXT: revw z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp2 = load <8 x i32>, <8 x i32>* %b + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> + store <8 x i32> %tmp3, <8 x i32>* %a + ret void +} + +; REVH pattern for shuffle v32i16 with 256 bits and 512 bits SVE. +define void @test_revhv32i16(<32 x i16>* %a) #0 { +; CHECK-LABEL: test_revhv32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: revh z0.d, p0/m, z0.d +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: revh z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0, #32] +; CHECK-NEXT: revh z0.d, p0/m, z2.d +; CHECK-NEXT: revh z1.d, p0/m, z3.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <32 x i16>, <32 x i16>* %a + %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <32 x i32> + store <32 x i16> %tmp2, <32 x i16>* %a + ret void +} + +; Only support to reverse bytes / halfwords / words within elements +define void @test_rev_elts_fail(<4 x i64>* %a) #0 { +; CHECK-LABEL: test_rev_elts_fail: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: mov z2.d, z0.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z0.d, z1.d[1] +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: stp x9, x8, [sp, #-32]! +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x11, x10, [sp, #16] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <4 x i64>, <4 x i64>* %a + %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> + store <4 x i64> %tmp2, <4 x i64>* %a + ret void +} + +; REV instruction will reverse the order of all elements in the vector. +; When the vector length and the target register size are inconsistent, +; the correctness of generated REV instruction for shuffle pattern cannot be guaranteed. + +define void @test_revv8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: test_revv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov z2.s, z0.s[1] +; CHECK-NEXT: mov z3.s, z0.s[2] +; CHECK-NEXT: mov z4.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: mov z0.s, z1.s[1] +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: stp w9, w8, [sp, #24] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: stp w11, w10, [sp, #16] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: stp w9, w8, [sp, #8] +; CHECK-NEXT: stp w11, w10, [sp] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + store <8 x i32> %tmp2, <8 x i32>* %a + ret void +} + +define void @test_revv32i8_vl256(<32 x i8>* %a) #0 { +; CHECK-LABEL: test_revv32i8_vl256: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: mov z2.b, z1.b[1] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z3.b, z1.b[2] +; CHECK-NEXT: mov z4.b, z1.b[3] +; CHECK-NEXT: mov z5.b, z1.b[4] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strb w8, [sp, #31] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: strb w9, [sp, #30] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: mov z6.b, z1.b[5] +; CHECK-NEXT: mov z7.b, z1.b[6] +; CHECK-NEXT: mov z16.b, z1.b[7] +; CHECK-NEXT: strb w10, [sp, #29] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strb w8, [sp, #28] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strb w9, [sp, #27] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: mov z17.b, z1.b[8] +; CHECK-NEXT: mov z18.b, z1.b[9] +; CHECK-NEXT: mov z19.b, z1.b[10] +; CHECK-NEXT: strb w10, [sp, #26] +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: strb w8, [sp, #25] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: strb w9, [sp, #24] +; CHECK-NEXT: fmov w9, s19 +; CHECK-NEXT: mov z20.b, z1.b[11] +; CHECK-NEXT: mov z21.b, z1.b[12] +; CHECK-NEXT: mov z2.b, z1.b[13] +; CHECK-NEXT: strb w10, [sp, #23] +; CHECK-NEXT: fmov w10, s20 +; CHECK-NEXT: strb w8, [sp, #22] +; CHECK-NEXT: fmov w8, s21 +; CHECK-NEXT: strb w9, [sp, #21] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[14] +; CHECK-NEXT: mov z1.b, z1.b[15] +; CHECK-NEXT: strb w10, [sp, #20] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strb w8, [sp, #19] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #18] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: strb w8, [sp, #17] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strb w9, [sp, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strb w10, [sp, #15] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z6.b, z0.b[6] +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: strb w9, [sp, #13] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: strb w10, [sp, #12] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: mov z7.b, z0.b[7] +; CHECK-NEXT: mov z16.b, z0.b[8] +; CHECK-NEXT: mov z17.b, z0.b[9] +; CHECK-NEXT: strb w8, [sp, #11] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strb w9, [sp, #10] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: strb w10, [sp, #9] +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: mov z18.b, z0.b[10] +; CHECK-NEXT: mov z19.b, z0.b[11] +; CHECK-NEXT: mov z20.b, z0.b[12] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: strb w9, [sp, #7] +; CHECK-NEXT: fmov w9, s19 +; CHECK-NEXT: strb w10, [sp, #6] +; CHECK-NEXT: fmov w10, s20 +; CHECK-NEXT: mov z21.b, z0.b[13] +; CHECK-NEXT: mov z22.b, z0.b[14] +; CHECK-NEXT: mov z23.b, z0.b[15] +; CHECK-NEXT: strb w8, [sp, #5] +; CHECK-NEXT: fmov w8, s21 +; CHECK-NEXT: strb w9, [sp, #4] +; CHECK-NEXT: fmov w9, s22 +; CHECK-NEXT: strb w10, [sp, #3] +; CHECK-NEXT: fmov w10, s23 +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: strb w9, [sp, #1] +; CHECK-NEXT: strb w10, [sp] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <32 x i8>, <32 x i8>* %a + %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> + store <32 x i8> %tmp2, <32 x i8>* %a + ret void +} + +define void @test_revv16i16_vl256(<16 x i16>* %a) #0 { +; CHECK-LABEL: test_revv16i16_vl256: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov z2.h, z0.h[1] +; CHECK-NEXT: mov z3.h, z0.h[2] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: mov z4.h, z0.h[3] +; CHECK-NEXT: mov z5.h, z0.h[4] +; CHECK-NEXT: mov z2.h, z0.h[5] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: strh w9, [sp, #28] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: strh w10, [sp, #26] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strh w9, [sp, #22] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strh w10, [sp, #20] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: mov z0.h, z1.h[1] +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: mov z3.h, z1.h[3] +; CHECK-NEXT: strh w8, [sp, #18] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w10, [sp, #14] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: mov z4.h, z1.h[4] +; CHECK-NEXT: mov z5.h, z1.h[5] +; CHECK-NEXT: mov z6.h, z1.h[6] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: strh w10, [sp, #8] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <16 x i16>, <16 x i16>* %a + %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> + store <16 x i16> %tmp2, <16 x i16>* %a + ret void +} + +define void @test_revv8f32_vl256(<8 x float>* %a) #0 { +; CHECK-LABEL: test_revv8f32_vl256: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov z2.s, z0.s[1] +; CHECK-NEXT: stp s2, s0, [sp, #24] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: stp s0, s2, [sp, #16] +; CHECK-NEXT: mov z0.s, z1.s[1] +; CHECK-NEXT: stp s0, s1, [sp, #8] +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z0.s, z1.s[3] +; CHECK-NEXT: stp s0, s2, [sp] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <8 x float>, <8 x float>* %a + %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> undef, <8 x i32> + store <8 x float> %tmp2, <8 x float>* %a + ret void +} + +define void @test_revv4f64_vl256(<4 x double>* %a) #0 { +; CHECK-LABEL: test_revv4f64_vl256: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov z2.d, z0.d[1] +; CHECK-NEXT: stp d2, d0, [sp, #16] +; CHECK-NEXT: mov z0.d, z1.d[1] +; CHECK-NEXT: stp d0, d1, [sp] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <4 x double>, <4 x double>* %a + %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> undef, <4 x i32> + store <4 x double> %tmp2, <4 x double>* %a + ret void +} + +define void @test_revv8i32v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: test_revv8i32v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: mov z2.s, z0.s[1] +; CHECK-NEXT: mov z3.s, z0.s[2] +; CHECK-NEXT: mov z4.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: mov z0.s, z1.s[1] +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: stp w9, w8, [sp, #24] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: stp w11, w10, [sp, #16] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: stp w9, w8, [sp, #8] +; CHECK-NEXT: stp w11, w10, [sp] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp2 = load <8 x i32>, <8 x i32>* %b + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> + store <8 x i32> %tmp3, <8 x i32>* %a + ret void +} + +; Illegal REV pattern. +define void @test_rev_fail(<16 x i16>* %a) #0 { +; CHECK-LABEL: test_rev_fail: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: mov z6.h, z1.h[6] +; CHECK-NEXT: mov z7.h, z1.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.h, z0.h[1] +; CHECK-NEXT: mov z3.h, z0.h[2] +; CHECK-NEXT: mov z4.h, z0.h[3] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z5.h, z0.h[4] +; CHECK-NEXT: mov z2.h, z0.h[5] +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: strh w9, [sp, #6] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z0.h, z1.h[1] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: mov z3.h, z1.h[3] +; CHECK-NEXT: mov z4.h, z1.h[4] +; CHECK-NEXT: strh w9, [sp] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w10, [sp, #30] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z5.h, z1.h[5] +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: strh w10, [sp, #24] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strh w8, [sp, #22] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: strh w10, [sp, #18] +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <16 x i16>, <16 x i16>* %a + %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> + store <16 x i16> %tmp2, <16 x i16>* %a + ret void +} + +define void @test_revv8i16v8i16(<8 x i16>* %a, <8 x i16>* %b, <16 x i16>* %c) #0 { +; CHECK-LABEL: test_revv8i16v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: revh z0.d, p0/m, z0.d +; CHECK-NEXT: revh z1.d, p0/m, z1.d +; CHECK-NEXT: stp q1, q0, [x2] +; CHECK-NEXT: ret + %tmp1 = load <8 x i16>, <8 x i16>* %a + %tmp2 = load <8 x i16>, <8 x i16>* %b + %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> + store <16 x i16> %tmp3, <16 x i16>* %c + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll @@ -0,0 +1,1446 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @zip1_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: zip1_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z2.b, z0.b[15] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z0.b[14] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z0.b[13] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z0.b[12] +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z0.b[11] +; CHECK-NEXT: strb w9, [sp, #12] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z0.b[10] +; CHECK-NEXT: strb w10, [sp, #10] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z0.b[9] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z0.b[8] +; CHECK-NEXT: strb w9, [sp, #6] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[15] +; CHECK-NEXT: strb w10, [sp, #4] +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z1.b[14] +; CHECK-NEXT: strb w9, [sp] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[13] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z1.b[12] +; CHECK-NEXT: strb w8, [sp, #15] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z1.b[11] +; CHECK-NEXT: strb w9, [sp, #13] +; CHECK-NEXT: strb w10, [sp, #11] +; CHECK-NEXT: zip1 z0.b, z0.b, z1.b +; CHECK-NEXT: strb w8, [sp, #9] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z1.b[10] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[9] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z1.b[8] +; CHECK-NEXT: strb w8, [sp, #7] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #5] +; CHECK-NEXT: strb w10, [sp, #3] +; CHECK-NEXT: strb w8, [sp, #1] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q2, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <32 x i8>, <32 x i8>* %a + %tmp2 = load volatile <32 x i8>, <32 x i8>* %b + %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> + store volatile <32 x i8> %tmp3, <32 x i8>* %a + ret void +} + +define void @zip_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 { +; CHECK-LABEL: zip_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: ldp q2, q5, [x1] +; CHECK-NEXT: ldp q4, q7, [x0] +; CHECK-NEXT: mov z16.h, z5.h[7] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z16.h, z5.h[6] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: mov z16.h, z5.h[5] +; CHECK-NEXT: mov z17.h, z7.h[7] +; CHECK-NEXT: fmov w9, s17 +; CHECK-NEXT: mov z17.h, z7.h[6] +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ldp q3, q6, [x1, #32] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: strh w9, [sp, #28] +; CHECK-NEXT: strh w10, [sp, #26] +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z16.h, z7.h[5] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: mov z16.h, z5.h[4] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: mov z16.h, z7.h[4] +; CHECK-NEXT: strh w8, [sp, #22] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z16.h, z2.h[7] +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: strh w10, [sp, #18] +; CHECK-NEXT: mov z18.h, z6.h[7] +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z16.h, z4.h[7] +; CHECK-NEXT: ldr q17, [sp, #16] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: mov z16.h, z2.h[6] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: mov z16.h, z4.h[6] +; CHECK-NEXT: strh w8, [sp, #62] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z16.h, z2.h[5] +; CHECK-NEXT: strh w9, [sp, #60] +; CHECK-NEXT: strh w10, [sp, #58] +; CHECK-NEXT: zip1 z5.h, z7.h, z5.h +; CHECK-NEXT: strh w8, [sp, #56] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z16.h, z4.h[5] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: mov z16.h, z2.h[4] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: mov z16.h, z4.h[4] +; CHECK-NEXT: strh w8, [sp, #54] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: strh w9, [sp, #52] +; CHECK-NEXT: zip1 z2.h, z4.h, z2.h +; CHECK-NEXT: strh w10, [sp, #50] +; CHECK-NEXT: strh w8, [sp, #48] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z1.h[7] +; CHECK-NEXT: ldr q16, [sp, #48] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z18.h, z6.h[6] +; CHECK-NEXT: fmov w10, s18 +; CHECK-NEXT: mov z18.h, z1.h[6] +; CHECK-NEXT: strh w8, [sp, #46] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z6.h[5] +; CHECK-NEXT: strh w9, [sp, #44] +; CHECK-NEXT: strh w10, [sp, #42] +; CHECK-NEXT: strh w8, [sp, #40] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z1.h[5] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z18.h, z6.h[4] +; CHECK-NEXT: fmov w10, s18 +; CHECK-NEXT: mov z18.h, z1.h[4] +; CHECK-NEXT: strh w8, [sp, #38] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z3.h[7] +; CHECK-NEXT: strh w9, [sp, #36] +; CHECK-NEXT: strh w10, [sp, #34] +; CHECK-NEXT: zip1 z1.h, z1.h, z6.h +; CHECK-NEXT: strh w8, [sp, #32] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z0.h[7] +; CHECK-NEXT: ldr q4, [sp, #32] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z18.h, z3.h[6] +; CHECK-NEXT: fmov w10, s18 +; CHECK-NEXT: mov z18.h, z0.h[6] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z3.h[5] +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: add z1.h, z5.h, z1.h +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.h, z0.h[5] +; CHECK-NEXT: add z4.h, z17.h, z4.h +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z18.h, z3.h[4] +; CHECK-NEXT: fmov w10, s18 +; CHECK-NEXT: mov z18.h, z0.h[4] +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: zip1 z0.h, z0.h, z3.h +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: add z0.h, z2.h, z0.h +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: ldr q3, [sp] +; CHECK-NEXT: stp q1, q4, [x0, #32] +; CHECK-NEXT: add z1.h, z16.h, z3.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %tmp1 = load <32 x i16>, <32 x i16>* %a + %tmp2 = load <32 x i16>, <32 x i16>* %b + %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> + %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> + %tmp5 = add <32 x i16> %tmp3, %tmp4 + store <32 x i16> %tmp5, <32 x i16>* %a + ret void +} + +define void @zip1_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: zip1_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z2.h, z0.h[7] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z0.h[5] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.h, z0.h[4] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[7] +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.h, z1.h[5] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[4] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: zip1 z0.h, z0.h, z1.h +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q2, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <16 x i16>, <16 x i16>* %a + %tmp2 = load volatile <16 x i16>, <16 x i16>* %b + %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> + store volatile <16 x i16> %tmp3, <16 x i16>* %a + ret void +} + +define void @zip1_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: zip1_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z2.s, z0.s[3] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: stp w8, w9, [sp, #8] +; CHECK-NEXT: stp w10, w11, [sp] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q2, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <8 x i32>, <8 x i32>* %a + %tmp2 = load volatile <8 x i32>, <8 x i32>* %b + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> + store volatile <8 x i32> %tmp3, <8 x i32>* %a + ret void +} + +define void @zip_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: zip_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: zip1 z4.d, z1.d, z2.d +; CHECK-NEXT: trn2 z1.d, z1.d, z2.d +; CHECK-NEXT: zip1 z2.d, z0.d, z3.d +; CHECK-NEXT: trn2 z0.d, z0.d, z3.d +; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z4.d +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: stp q2, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <4 x double>, <4 x double>* %a + %tmp2 = load <4 x double>, <4 x double>* %b + %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> + %tmp5 = fadd <4 x double> %tmp3, %tmp4 + store <4 x double> %tmp5, <4 x double>* %a + ret void +} + +define void @zip_v4i32(<4 x i32>* %a, <4 x i32>* %b) #0 { +; CHECK-LABEL: zip_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: mov z2.s, z0.s[3] +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z3.s, z1.s[2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: zip1 z0.s, z1.s, z0.s +; CHECK-NEXT: stp w9, w8, [sp, #8] +; CHECK-NEXT: stp w11, w10, [sp] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: add z0.s, z0.s, z2.s +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load <4 x i32>, <4 x i32>* %a + %tmp2 = load <4 x i32>, <4 x i32>* %b + %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp5 = add <4 x i32> %tmp3, %tmp4 + store <4 x i32> %tmp5, <4 x i32>* %a + ret void +} + +define void @zip1_v8i32_undef(<8 x i32>* %a) #0 { +; CHECK-LABEL: zip1_v8i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z0.s[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: zip1 z0.s, z0.s, z0.s +; CHECK-NEXT: stp w8, w8, [sp, #8] +; CHECK-NEXT: stp w9, w9, [sp] +; CHECK-NEXT: ldr q1, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q1, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <8 x i32>, <8 x i32>* %a + %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + store volatile <8 x i32> %tmp2, <8 x i32>* %a + ret void +} + +define void @trn_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: trn_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: trn1 z4.b, z1.b, z2.b +; CHECK-NEXT: trn2 z1.b, z1.b, z2.b +; CHECK-NEXT: add z1.b, z4.b, z1.b +; CHECK-NEXT: trn1 z5.b, z0.b, z3.b +; CHECK-NEXT: trn2 z0.b, z0.b, z3.b +; CHECK-NEXT: add z0.b, z5.b, z0.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <32 x i8>, <32 x i8>* %a + %tmp2 = load <32 x i8>, <32 x i8>* %b + %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> + %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> + %tmp5 = add <32 x i8> %tmp3, %tmp4 + store <32 x i8> %tmp5, <32 x i8>* %a + ret void +} + +define void @trn_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 { +; CHECK-LABEL: trn_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q3, q2, [x0, #32] +; CHECK-NEXT: ldp q5, q4, [x1, #32] +; CHECK-NEXT: trn1 z16.h, z3.h, z5.h +; CHECK-NEXT: trn2 z3.h, z3.h, z5.h +; CHECK-NEXT: add z3.h, z16.h, z3.h +; CHECK-NEXT: ldp q6, q7, [x1] +; CHECK-NEXT: trn1 z17.h, z2.h, z4.h +; CHECK-NEXT: trn2 z2.h, z2.h, z4.h +; CHECK-NEXT: add z2.h, z17.h, z2.h +; CHECK-NEXT: stp q3, q2, [x0, #32] +; CHECK-NEXT: trn1 z18.h, z1.h, z6.h +; CHECK-NEXT: trn2 z1.h, z1.h, z6.h +; CHECK-NEXT: add z1.h, z18.h, z1.h +; CHECK-NEXT: trn1 z19.h, z0.h, z7.h +; CHECK-NEXT: trn2 z0.h, z0.h, z7.h +; CHECK-NEXT: add z0.h, z19.h, z0.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <32 x i16>, <32 x i16>* %a + %tmp2 = load <32 x i16>, <32 x i16>* %b + %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> + %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> + %tmp5 = add <32 x i16> %tmp3, %tmp4 + store <32 x i16> %tmp5, <32 x i16>* %a + ret void +} + +define void @trn_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: trn_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: trn1 z4.h, z1.h, z2.h +; CHECK-NEXT: trn2 z1.h, z1.h, z2.h +; CHECK-NEXT: add z1.h, z4.h, z1.h +; CHECK-NEXT: trn1 z5.h, z0.h, z3.h +; CHECK-NEXT: trn2 z0.h, z0.h, z3.h +; CHECK-NEXT: add z0.h, z5.h, z0.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <16 x i16>, <16 x i16>* %a + %tmp2 = load <16 x i16>, <16 x i16>* %b + %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> + %tmp4 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> + %tmp5 = add <16 x i16> %tmp3, %tmp4 + store <16 x i16> %tmp5, <16 x i16>* %a + ret void +} + +define void @trn_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: trn_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: zip1 z4.s, z1.s, z2.s +; CHECK-NEXT: trn2 z1.s, z1.s, z2.s +; CHECK-NEXT: add z1.s, z4.s, z1.s +; CHECK-NEXT: trn1 z5.s, z0.s, z3.s +; CHECK-NEXT: trn2 z0.s, z0.s, z3.s +; CHECK-NEXT: add z0.s, z5.s, z0.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp2 = load <8 x i32>, <8 x i32>* %b + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> + %tmp5 = add <8 x i32> %tmp3, %tmp4 + store <8 x i32> %tmp5, <8 x i32>* %a + ret void +} + +define void @trn_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: trn_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: zip1 z4.d, z1.d, z2.d +; CHECK-NEXT: trn2 z1.d, z1.d, z2.d +; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z4.d +; CHECK-NEXT: zip1 z5.d, z0.d, z3.d +; CHECK-NEXT: trn2 z0.d, z0.d, z3.d +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z5.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <4 x double>, <4 x double>* %a + %tmp2 = load <4 x double>, <4 x double>* %b + %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> + %tmp5 = fadd <4 x double> %tmp3, %tmp4 + store <4 x double> %tmp5, <4 x double>* %a + ret void +} + +define void @trn_v4f32(<4 x float>* %a, <4 x float>* %b) #0 { +; CHECK-LABEL: trn_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: trn1 z2.s, z0.s, z1.s +; CHECK-NEXT: trn2 z0.s, z0.s, z1.s +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <4 x float>, <4 x float>* %a + %tmp2 = load <4 x float>, <4 x float>* %b + %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> + %tmp5 = fadd <4 x float> %tmp3, %tmp4 + store <4 x float> %tmp5, <4 x float>* %a + ret void +} + +define void @trn_v8i32_undef(<8 x i32>* %a) #0 { +; CHECK-LABEL: trn_v8i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: trn1 z2.s, z0.s, z0.s +; CHECK-NEXT: trn2 z0.s, z0.s, z0.s +; CHECK-NEXT: add z0.s, z2.s, z0.s +; CHECK-NEXT: trn1 z3.s, z1.s, z1.s +; CHECK-NEXT: trn2 z1.s, z1.s, z1.s +; CHECK-NEXT: add z1.s, z3.s, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + %tmp5 = add <8 x i32> %tmp3, %tmp4 + store <8 x i32> %tmp5, <8 x i32>* %a + ret void +} + +define void @zip2_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0{ +; CHECK-LABEL: zip2_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: ldr q1, [x1, #16] +; CHECK-NEXT: mov z2.b, z0.b[15] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z0.b[14] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z0.b[13] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z0.b[12] +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z0.b[11] +; CHECK-NEXT: strb w9, [sp, #12] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z0.b[10] +; CHECK-NEXT: strb w10, [sp, #10] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z0.b[9] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z0.b[8] +; CHECK-NEXT: strb w9, [sp, #6] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[15] +; CHECK-NEXT: strb w10, [sp, #4] +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z1.b[14] +; CHECK-NEXT: strb w9, [sp] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[13] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z1.b[12] +; CHECK-NEXT: strb w8, [sp, #15] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z1.b[11] +; CHECK-NEXT: strb w9, [sp, #13] +; CHECK-NEXT: strb w10, [sp, #11] +; CHECK-NEXT: zip1 z0.b, z0.b, z1.b +; CHECK-NEXT: strb w8, [sp, #9] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.b, z1.b[10] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.b, z1.b[9] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.b, z1.b[8] +; CHECK-NEXT: strb w8, [sp, #7] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #5] +; CHECK-NEXT: strb w10, [sp, #3] +; CHECK-NEXT: strb w8, [sp, #1] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q2, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <32 x i8>, <32 x i8>* %a + %tmp2 = load volatile <32 x i8>, <32 x i8>* %b + %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> + store volatile <32 x i8> %tmp3, <32 x i8>* %a + ret void +} + +define void @zip2_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0{ +; CHECK-LABEL: zip2_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: ldr q1, [x1, #16] +; CHECK-NEXT: mov z2.h, z0.h[7] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z0.h[5] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.h, z0.h[4] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[7] +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[6] +; CHECK-NEXT: strh w10, [sp, #4] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.h, z1.h[5] +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[4] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: zip1 z0.h, z0.h, z1.h +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q2, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <16 x i16>, <16 x i16>* %a + %tmp2 = load volatile <16 x i16>, <16 x i16>* %b + %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> + store volatile <16 x i16> %tmp3, <16 x i16>* %a + ret void +} + +define void @zip2_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0{ +; CHECK-LABEL: zip2_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: ldr q1, [x1, #16] +; CHECK-NEXT: mov z2.s, z0.s[3] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: stp w8, w9, [sp, #8] +; CHECK-NEXT: stp w10, w11, [sp] +; CHECK-NEXT: ldr q2, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q2, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <8 x i32>, <8 x i32>* %a + %tmp2 = load volatile <8 x i32>, <8 x i32>* %b + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> + store volatile <8 x i32> %tmp3, <8 x i32>* %a + ret void +} + +define void @zip2_v8i32_undef(<8 x i32>* %a) #0{ +; CHECK-LABEL: zip2_v8i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z0.s[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: zip1 z0.s, z0.s, z0.s +; CHECK-NEXT: stp w8, w8, [sp, #8] +; CHECK-NEXT: stp w9, w9, [sp] +; CHECK-NEXT: ldr q1, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: str q1, [x0, #16] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %tmp1 = load volatile <8 x i32>, <8 x i32>* %a + %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + store volatile <8 x i32> %tmp2, <8 x i32>* %a + ret void +} + +define void @uzp_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0{ +; CHECK-LABEL: uzp_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #128 +; CHECK-NEXT: .cfi_def_cfa_offset 128 +; CHECK-NEXT: stp d15, d14, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: stp d13, d12, [sp, #80] // 16-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #96] // 16-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #112] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_offset b8, -8 +; CHECK-NEXT: .cfi_offset b9, -16 +; CHECK-NEXT: .cfi_offset b10, -24 +; CHECK-NEXT: .cfi_offset b11, -32 +; CHECK-NEXT: .cfi_offset b12, -40 +; CHECK-NEXT: .cfi_offset b13, -48 +; CHECK-NEXT: .cfi_offset b14, -56 +; CHECK-NEXT: .cfi_offset b15, -64 +; CHECK-NEXT: ldp q0, q3, [x0] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z27.b, z0.b[14] +; CHECK-NEXT: mov z28.b, z0.b[12] +; CHECK-NEXT: mov z30.b, z0.b[8] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z2.b, z3.b[12] +; CHECK-NEXT: mov z4.b, z3.b[10] +; CHECK-NEXT: mov z1.b, z3.b[14] +; CHECK-NEXT: ldp q10, q11, [x1] +; CHECK-NEXT: strb w8, [sp, #40] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #32] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z6.b, z3.b[6] +; CHECK-NEXT: mov z7.b, z3.b[4] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w8, [sp, #46] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: strb w9, [sp, #45] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: mov z5.b, z3.b[8] +; CHECK-NEXT: strb w10, [sp, #47] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #43] +; CHECK-NEXT: fmov w8, s27 +; CHECK-NEXT: strb w9, [sp, #42] +; CHECK-NEXT: fmov w9, s28 +; CHECK-NEXT: mov z16.b, z3.b[2] +; CHECK-NEXT: mov z31.b, z0.b[6] +; CHECK-NEXT: strb w10, [sp, #44] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strb w8, [sp, #39] +; CHECK-NEXT: fmov w8, s30 +; CHECK-NEXT: strb w9, [sp, #38] +; CHECK-NEXT: fmov w9, s31 +; CHECK-NEXT: mov z29.b, z0.b[10] +; CHECK-NEXT: mov z9.b, z0.b[2] +; CHECK-NEXT: strb w10, [sp, #41] +; CHECK-NEXT: fmov w10, s29 +; CHECK-NEXT: strb w8, [sp, #36] +; CHECK-NEXT: fmov w8, s9 +; CHECK-NEXT: strb w9, [sp, #35] +; CHECK-NEXT: fmov w9, s11 +; CHECK-NEXT: mov z8.b, z0.b[4] +; CHECK-NEXT: mov z16.b, z11.b[4] +; CHECK-NEXT: mov z27.b, z11.b[2] +; CHECK-NEXT: strb w10, [sp, #37] +; CHECK-NEXT: fmov w10, s8 +; CHECK-NEXT: strb w8, [sp, #33] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: strb w9, [sp, #8] +; CHECK-NEXT: fmov w9, s27 +; CHECK-NEXT: mov z5.b, z11.b[10] +; CHECK-NEXT: mov z6.b, z11.b[8] +; CHECK-NEXT: mov z2.b, z11.b[14] +; CHECK-NEXT: fmov w12, s5 +; CHECK-NEXT: fmov w13, s6 +; CHECK-NEXT: mov z5.b, z10.b[10] +; CHECK-NEXT: mov z6.b, z10.b[8] +; CHECK-NEXT: strb w10, [sp, #34] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #10] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: mov z4.b, z11.b[12] +; CHECK-NEXT: mov z7.b, z11.b[6] +; CHECK-NEXT: mov z28.b, z11.b[15] +; CHECK-NEXT: mov z29.b, z11.b[13] +; CHECK-NEXT: mov z30.b, z11.b[11] +; CHECK-NEXT: mov z31.b, z11.b[9] +; CHECK-NEXT: mov z8.b, z11.b[7] +; CHECK-NEXT: mov z9.b, z11.b[5] +; CHECK-NEXT: mov z12.b, z11.b[3] +; CHECK-NEXT: mov z13.b, z11.b[1] +; CHECK-NEXT: mov z2.b, z10.b[14] +; CHECK-NEXT: mov z11.b, z10.b[4] +; CHECK-NEXT: mov z14.b, z10.b[2] +; CHECK-NEXT: strb w10, [sp, #15] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #5] +; CHECK-NEXT: fmov w8, s11 +; CHECK-NEXT: strb w9, [sp, #4] +; CHECK-NEXT: fmov w9, s14 +; CHECK-NEXT: mov z17.b, z3.b[15] +; CHECK-NEXT: mov z18.b, z3.b[13] +; CHECK-NEXT: fmov w14, s7 +; CHECK-NEXT: mov z7.b, z10.b[6] +; CHECK-NEXT: strb w10, [sp, #7] +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: strb w9, [sp, #1] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z19.b, z3.b[11] +; CHECK-NEXT: mov z20.b, z3.b[9] +; CHECK-NEXT: mov z21.b, z3.b[7] +; CHECK-NEXT: strb w10, [sp, #3] +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: strb w8, [sp, #63] +; CHECK-NEXT: fmov w8, s20 +; CHECK-NEXT: strb w9, [sp, #62] +; CHECK-NEXT: fmov w9, s21 +; CHECK-NEXT: mov z22.b, z3.b[5] +; CHECK-NEXT: mov z23.b, z3.b[3] +; CHECK-NEXT: mov z3.b, z0.b[13] +; CHECK-NEXT: strb w10, [sp, #61] +; CHECK-NEXT: fmov w10, s22 +; CHECK-NEXT: strb w8, [sp, #60] +; CHECK-NEXT: fmov w8, s23 +; CHECK-NEXT: strb w9, [sp, #59] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z24.b, z0.b[11] +; CHECK-NEXT: mov z25.b, z0.b[9] +; CHECK-NEXT: mov z26.b, z0.b[5] +; CHECK-NEXT: strb w10, [sp, #58] +; CHECK-NEXT: fmov w10, s24 +; CHECK-NEXT: strb w8, [sp, #57] +; CHECK-NEXT: fmov w8, s25 +; CHECK-NEXT: strb w9, [sp, #54] +; CHECK-NEXT: fmov w9, s26 +; CHECK-NEXT: mov z1.b, z0.b[3] +; CHECK-NEXT: mov z0.b, z0.b[1] +; CHECK-NEXT: strb w10, [sp, #53] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w8, [sp, #52] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strb w9, [sp, #50] +; CHECK-NEXT: fmov w9, s28 +; CHECK-NEXT: strb w10, [sp, #49] +; CHECK-NEXT: fmov w10, s29 +; CHECK-NEXT: strb w8, [sp, #48] +; CHECK-NEXT: fmov w8, s30 +; CHECK-NEXT: strb w9, [sp, #31] +; CHECK-NEXT: fmov w9, s31 +; CHECK-NEXT: strb w10, [sp, #30] +; CHECK-NEXT: fmov w10, s8 +; CHECK-NEXT: strb w8, [sp, #29] +; CHECK-NEXT: fmov w8, s9 +; CHECK-NEXT: strb w9, [sp, #28] +; CHECK-NEXT: fmov w9, s12 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: mov z15.b, z10.b[15] +; CHECK-NEXT: mov z16.b, z10.b[13] +; CHECK-NEXT: strb w10, [sp, #27] +; CHECK-NEXT: fmov w10, s13 +; CHECK-NEXT: strb w8, [sp, #26] +; CHECK-NEXT: fmov w8, s15 +; CHECK-NEXT: strb w9, [sp, #25] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: mov z4.b, z10.b[12] +; CHECK-NEXT: mov z27.b, z10.b[11] +; CHECK-NEXT: strb w11, [sp, #14] +; CHECK-NEXT: mov z2.b, z10.b[9] +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: mov z4.b, z10.b[7] +; CHECK-NEXT: strb w10, [sp, #24] +; CHECK-NEXT: fmov w10, s27 +; CHECK-NEXT: strb w8, [sp, #23] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp, #22] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z5.b, z10.b[5] +; CHECK-NEXT: mov z6.b, z10.b[3] +; CHECK-NEXT: mov z7.b, z10.b[1] +; CHECK-NEXT: fmov w15, s10 +; CHECK-NEXT: strb w10, [sp, #21] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #20] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: strb w9, [sp, #19] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: strb w15, [sp] +; CHECK-NEXT: strb w12, [sp, #13] +; CHECK-NEXT: ldr q17, [sp, #32] +; CHECK-NEXT: strb w13, [sp, #12] +; CHECK-NEXT: ldr q0, [sp, #48] +; CHECK-NEXT: strb w14, [sp, #11] +; CHECK-NEXT: strb w11, [sp, #6] +; CHECK-NEXT: strb w10, [sp, #18] +; CHECK-NEXT: ldr q18, [sp] +; CHECK-NEXT: strb w8, [sp, #17] +; CHECK-NEXT: add z0.b, z17.b, z0.b +; CHECK-NEXT: strb w9, [sp, #16] +; CHECK-NEXT: ldr q1, [sp, #16] +; CHECK-NEXT: ldp d9, d8, [sp, #112] // 16-byte Folded Reload +; CHECK-NEXT: ldp d11, d10, [sp, #96] // 16-byte Folded Reload +; CHECK-NEXT: add z1.b, z18.b, z1.b +; CHECK-NEXT: ldp d13, d12, [sp, #80] // 16-byte Folded Reload +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp d15, d14, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #128 +; CHECK-NEXT: ret + %tmp1 = load <32 x i8>, <32 x i8>* %a + %tmp2 = load <32 x i8>, <32 x i8>* %b + %tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> + %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> + %tmp5 = add <32 x i8> %tmp3, %tmp4 + store <32 x i8> %tmp5, <32 x i8>* %a + ret void +} + +define void @uzp_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0{ +; CHECK-LABEL: uzp_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #192 +; CHECK-NEXT: .cfi_def_cfa_offset 192 +; CHECK-NEXT: stp d15, d14, [sp, #128] // 16-byte Folded Spill +; CHECK-NEXT: stp d13, d12, [sp, #144] // 16-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #160] // 16-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #176] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_offset b8, -8 +; CHECK-NEXT: .cfi_offset b9, -16 +; CHECK-NEXT: .cfi_offset b10, -24 +; CHECK-NEXT: .cfi_offset b11, -32 +; CHECK-NEXT: .cfi_offset b12, -40 +; CHECK-NEXT: .cfi_offset b13, -48 +; CHECK-NEXT: .cfi_offset b14, -56 +; CHECK-NEXT: .cfi_offset b15, -64 +; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z22.h, z4.h[4] +; CHECK-NEXT: mov z23.h, z4.h[2] +; CHECK-NEXT: mov z21.h, z4.h[6] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: mov z7.h, z5.h[4] +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: mov z16.h, z5.h[2] +; CHECK-NEXT: mov z6.h, z5.h[6] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: mov z17.h, z5.h[7] +; CHECK-NEXT: mov z18.h, z5.h[5] +; CHECK-NEXT: mov z19.h, z5.h[3] +; CHECK-NEXT: mov z14.h, z0.h[4] +; CHECK-NEXT: mov z15.h, z0.h[2] +; CHECK-NEXT: mov z13.h, z0.h[6] +; CHECK-NEXT: mov z20.h, z5.h[1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: mov z5.h, z4.h[7] +; CHECK-NEXT: mov z6.h, z4.h[5] +; CHECK-NEXT: mov z12.h, z1.h[5] +; CHECK-NEXT: mov z10.h, z2.h[4] +; CHECK-NEXT: mov z11.h, z2.h[2] +; CHECK-NEXT: ldp q25, q24, [x1, #32] +; CHECK-NEXT: strh w8, [sp, #40] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strh w9, [sp, #32] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: strh w10, [sp, #46] +; CHECK-NEXT: fmov w10, s21 +; CHECK-NEXT: strh w8, [sp, #44] +; CHECK-NEXT: fmov w8, s22 +; CHECK-NEXT: strh w9, [sp, #42] +; CHECK-NEXT: mov z29.h, z25.h[6] +; CHECK-NEXT: fmov w9, s24 +; CHECK-NEXT: mov z26.h, z24.h[6] +; CHECK-NEXT: strh w8, [sp, #36] +; CHECK-NEXT: fmov w8, s23 +; CHECK-NEXT: mov z27.h, z24.h[4] +; CHECK-NEXT: mov z30.h, z25.h[4] +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s27 +; CHECK-NEXT: strh w8, [sp, #34] +; CHECK-NEXT: fmov w8, s26 +; CHECK-NEXT: strh w10, [sp, #38] +; CHECK-NEXT: mov z31.h, z25.h[2] +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: fmov w9, s30 +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s29 +; CHECK-NEXT: fmov w10, s25 +; CHECK-NEXT: mov z28.h, z24.h[2] +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s31 +; CHECK-NEXT: strh w10, [sp] +; CHECK-NEXT: fmov w10, s28 +; CHECK-NEXT: mov z28.h, z3.h[6] +; CHECK-NEXT: mov z29.h, z3.h[4] +; CHECK-NEXT: strh w8, [sp, #2] +; CHECK-NEXT: fmov w8, s28 +; CHECK-NEXT: strh w9, [sp, #104] +; CHECK-NEXT: fmov w9, s29 +; CHECK-NEXT: mov z30.h, z3.h[2] +; CHECK-NEXT: mov z31.h, z3.h[5] +; CHECK-NEXT: mov z8.h, z3.h[3] +; CHECK-NEXT: mov z9.h, z3.h[1] +; CHECK-NEXT: mov z3.h, z2.h[6] +; CHECK-NEXT: strh w8, [sp, #110] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w9, [sp, #108] +; CHECK-NEXT: fmov w9, s10 +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z10.h, z1.h[2] +; CHECK-NEXT: strh w8, [sp, #102] +; CHECK-NEXT: fmov w8, s11 +; CHECK-NEXT: strh w9, [sp, #100] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w10, [sp, #96] +; CHECK-NEXT: fmov w10, s30 +; CHECK-NEXT: mov z30.h, z1.h[4] +; CHECK-NEXT: strh w8, [sp, #98] +; CHECK-NEXT: strh w9, [sp, #72] +; CHECK-NEXT: fmov w8, s30 +; CHECK-NEXT: fmov w9, s10 +; CHECK-NEXT: strh w10, [sp, #106] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: mov z7.h, z4.h[3] +; CHECK-NEXT: strh w8, [sp, #76] +; CHECK-NEXT: fmov w8, s14 +; CHECK-NEXT: strh w9, [sp, #74] +; CHECK-NEXT: fmov w9, s15 +; CHECK-NEXT: strh w10, [sp, #64] +; CHECK-NEXT: fmov w10, s13 +; CHECK-NEXT: strh w8, [sp, #68] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: strh w9, [sp, #66] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: strh w10, [sp, #70] +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: strh w8, [sp, #62] +; CHECK-NEXT: fmov w8, s20 +; CHECK-NEXT: strh w9, [sp, #60] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: mov z4.h, z4.h[1] +; CHECK-NEXT: strh w10, [sp, #58] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strh w8, [sp, #56] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strh w9, [sp, #54] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z16.h, z24.h[7] +; CHECK-NEXT: mov z21.h, z24.h[5] +; CHECK-NEXT: mov z22.h, z24.h[3] +; CHECK-NEXT: strh w10, [sp, #52] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strh w8, [sp, #50] +; CHECK-NEXT: fmov w8, s21 +; CHECK-NEXT: strh w9, [sp, #48] +; CHECK-NEXT: fmov w9, s22 +; CHECK-NEXT: mov z23.h, z24.h[1] +; CHECK-NEXT: mov z24.h, z25.h[7] +; CHECK-NEXT: mov z26.h, z25.h[5] +; CHECK-NEXT: strh w10, [sp, #30] +; CHECK-NEXT: fmov w10, s23 +; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w8, s24 +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: fmov w9, s26 +; CHECK-NEXT: mov z27.h, z25.h[3] +; CHECK-NEXT: mov z25.h, z25.h[1] +; CHECK-NEXT: strh w10, [sp, #24] +; CHECK-NEXT: fmov w10, s27 +; CHECK-NEXT: strh w8, [sp, #22] +; CHECK-NEXT: fmov w8, s25 +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: fmov w9, s31 +; CHECK-NEXT: mov z3.h, z2.h[5] +; CHECK-NEXT: strh w10, [sp, #18] +; CHECK-NEXT: fmov w10, s8 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: fmov w8, s9 +; CHECK-NEXT: strh w9, [sp, #124] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z28.h, z2.h[3] +; CHECK-NEXT: mov z2.h, z2.h[1] +; CHECK-NEXT: mov z11.h, z1.h[7] +; CHECK-NEXT: strh w10, [sp, #122] +; CHECK-NEXT: fmov w10, s28 +; CHECK-NEXT: strh w8, [sp, #120] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strh w9, [sp, #116] +; CHECK-NEXT: fmov w9, s11 +; CHECK-NEXT: mov z29.h, z1.h[6] +; CHECK-NEXT: fmov w11, s29 +; CHECK-NEXT: mov z29.h, z1.h[3] +; CHECK-NEXT: mov z1.h, z1.h[1] +; CHECK-NEXT: strh w10, [sp, #114] +; CHECK-NEXT: fmov w10, s12 +; CHECK-NEXT: strh w8, [sp, #112] +; CHECK-NEXT: fmov w8, s29 +; CHECK-NEXT: strh w9, [sp, #94] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z30.h, z0.h[7] +; CHECK-NEXT: mov z10.h, z0.h[5] +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: strh w10, [sp, #92] +; CHECK-NEXT: fmov w10, s30 +; CHECK-NEXT: strh w8, [sp, #90] +; CHECK-NEXT: fmov w8, s10 +; CHECK-NEXT: strh w9, [sp, #88] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: ldr q13, [sp, #32] +; CHECK-NEXT: strh w11, [sp, #78] +; CHECK-NEXT: ldr q0, [sp, #48] +; CHECK-NEXT: strh w10, [sp, #86] +; CHECK-NEXT: ldr q17, [sp] +; CHECK-NEXT: strh w8, [sp, #84] +; CHECK-NEXT: strh w9, [sp, #82] +; CHECK-NEXT: ldr q1, [sp, #16] +; CHECK-NEXT: ldr q18, [sp, #96] +; CHECK-NEXT: add z0.h, z13.h, z0.h +; CHECK-NEXT: ldr q19, [sp, #64] +; CHECK-NEXT: ldr q2, [sp, #112] +; CHECK-NEXT: add z1.h, z17.h, z1.h +; CHECK-NEXT: ldr q3, [sp, #80] +; CHECK-NEXT: stp q0, q1, [x0, #32] +; CHECK-NEXT: ldp d9, d8, [sp, #176] // 16-byte Folded Reload +; CHECK-NEXT: add z0.h, z18.h, z2.h +; CHECK-NEXT: ldp d11, d10, [sp, #160] // 16-byte Folded Reload +; CHECK-NEXT: add z1.h, z19.h, z3.h +; CHECK-NEXT: ldp d13, d12, [sp, #144] // 16-byte Folded Reload +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp d15, d14, [sp, #128] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #192 +; CHECK-NEXT: ret + %tmp1 = load <32 x i16>, <32 x i16>* %a + %tmp2 = load <32 x i16>, <32 x i16>* %b + %tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> + %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> + %tmp5 = add <32 x i16> %tmp3, %tmp4 + store <32 x i16> %tmp5, <32 x i16>* %a + ret void +} + +define void @uzp_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0{ +; CHECK-LABEL: uzp_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov z17.h, z0.h[4] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z18.h, z0.h[2] +; CHECK-NEXT: mov z19.h, z0.h[7] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z3.h, z1.h[4] +; CHECK-NEXT: ldp q21, q22, [x1] +; CHECK-NEXT: mov z2.h, z1.h[6] +; CHECK-NEXT: mov z4.h, z1.h[2] +; CHECK-NEXT: strh w8, [sp, #40] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z5.h, z1.h[7] +; CHECK-NEXT: mov z6.h, z1.h[5] +; CHECK-NEXT: mov z7.h, z1.h[3] +; CHECK-NEXT: strh w8, [sp, #44] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: mov z16.h, z1.h[1] +; CHECK-NEXT: mov z1.h, z0.h[6] +; CHECK-NEXT: strh w9, [sp, #32] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w10, [sp, #46] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #36] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z2.h, z22.h[6] +; CHECK-NEXT: strh w9, [sp, #42] +; CHECK-NEXT: strh w10, [sp, #38] +; CHECK-NEXT: fmov w9, s22 +; CHECK-NEXT: fmov w10, s21 +; CHECK-NEXT: strh w8, [sp, #34] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z3.h, z22.h[4] +; CHECK-NEXT: mov z4.h, z22.h[2] +; CHECK-NEXT: mov z17.h, z22.h[7] +; CHECK-NEXT: mov z18.h, z22.h[5] +; CHECK-NEXT: mov z23.h, z22.h[3] +; CHECK-NEXT: mov z24.h, z22.h[1] +; CHECK-NEXT: mov z22.h, z21.h[6] +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strh w10, [sp] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s22 +; CHECK-NEXT: mov z25.h, z21.h[4] +; CHECK-NEXT: mov z26.h, z21.h[2] +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: fmov w9, s25 +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: fmov w10, s26 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: strh w9, [sp, #4] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: strh w10, [sp, #2] +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: strh w8, [sp, #62] +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: mov z20.h, z0.h[5] +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: strh w9, [sp, #60] +; CHECK-NEXT: fmov w9, s19 +; CHECK-NEXT: strh w10, [sp, #58] +; CHECK-NEXT: fmov w10, s20 +; CHECK-NEXT: strh w8, [sp, #56] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: strh w9, [sp, #54] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strh w10, [sp, #52] +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: strh w8, [sp, #50] +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z27.h, z21.h[7] +; CHECK-NEXT: strh w9, [sp, #48] +; CHECK-NEXT: fmov w9, s23 +; CHECK-NEXT: strh w10, [sp, #30] +; CHECK-NEXT: fmov w10, s24 +; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w8, s27 +; CHECK-NEXT: mov z28.h, z21.h[5] +; CHECK-NEXT: mov z2.h, z21.h[3] +; CHECK-NEXT: mov z3.h, z21.h[1] +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: fmov w9, s28 +; CHECK-NEXT: strh w10, [sp, #24] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #22] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: ldr q4, [sp, #32] +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: ldr q5, [sp] +; CHECK-NEXT: strh w10, [sp, #18] +; CHECK-NEXT: ldr q0, [sp, #48] +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: ldr q1, [sp, #16] +; CHECK-NEXT: add z0.h, z4.h, z0.h +; CHECK-NEXT: add z1.h, z5.h, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %tmp1 = load <16 x i16>, <16 x i16>* %a + %tmp2 = load <16 x i16>, <16 x i16>* %b + %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> + %tmp4 = shufflevector <16 x i16> %tmp1, <16 x i16> %tmp2, <16 x i32> + %tmp5 = add <16 x i16> %tmp3, %tmp4 + store <16 x i16> %tmp5, <16 x i16>* %a + ret void +} + +define void @uzp_v8f32(<8 x float>* %a, <8 x float>* %b) #0{ +; CHECK-LABEL: uzp_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: mov z4.s, z0.s[2] +; CHECK-NEXT: stp s0, s4, [sp, #24] +; CHECK-NEXT: mov z4.s, z3.s[2] +; CHECK-NEXT: mov z5.s, z2.s[2] +; CHECK-NEXT: stp s4, s2, [sp, #4] +; CHECK-NEXT: stp s5, s1, [sp, #12] +; CHECK-NEXT: mov z5.s, z0.s[3] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: stp s0, s5, [sp, #40] +; CHECK-NEXT: mov z0.s, z3.s[3] +; CHECK-NEXT: str s1, [sp, #32] +; CHECK-NEXT: mov z1.s, z3.s[1] +; CHECK-NEXT: stp s1, s0, [sp, #48] +; CHECK-NEXT: ldp q4, q2, [sp] +; CHECK-NEXT: ldp q0, q1, [sp, #32] +; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z4.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: ret + %tmp1 = load <8 x float>, <8 x float>* %a + %tmp2 = load <8 x float>, <8 x float>* %b + %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> + %tmp5 = fadd <8 x float> %tmp3, %tmp4 + store <8 x float> %tmp5, <8 x float>* %a + ret void +} + +define void @uzp_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0{ +; CHECK-LABEL: uzp_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: zip1 z4.d, z1.d, z0.d +; CHECK-NEXT: trn2 z0.d, z1.d, z0.d +; CHECK-NEXT: add z0.d, z4.d, z0.d +; CHECK-NEXT: zip1 z5.d, z3.d, z2.d +; CHECK-NEXT: trn2 z1.d, z3.d, z2.d +; CHECK-NEXT: add z1.d, z5.d, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %tmp1 = load <4 x i64>, <4 x i64>* %a + %tmp2 = load <4 x i64>, <4 x i64>* %b + %tmp3 = shufflevector <4 x i64> %tmp1, <4 x i64> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i64> %tmp1, <4 x i64> %tmp2, <4 x i32> + %tmp5 = add <4 x i64> %tmp3, %tmp4 + store <4 x i64> %tmp5, <4 x i64>* %a + ret void +} + +define void @uzp_v8i16(<8 x i16>* %a, <8 x i16>* %b) #0{ +; CHECK-LABEL: uzp_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: mov z3.h, z0.h[4] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: mov z4.h, z0.h[2] +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z5.h, z0.h[7] +; CHECK-NEXT: mov z6.h, z0.h[5] +; CHECK-NEXT: mov z7.h, z0.h[3] +; CHECK-NEXT: mov z16.h, z0.h[1] +; CHECK-NEXT: mov z0.h, z1.h[6] +; CHECK-NEXT: mov z17.h, z1.h[4] +; CHECK-NEXT: strh w9, [sp] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w10, [sp, #14] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: mov z18.h, z1.h[2] +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: strh w10, [sp, #6] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strh w8, [sp, #4] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z19.h, z1.h[7] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: strh w10, [sp, #30] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: fmov w8, s19 +; CHECK-NEXT: mov z20.h, z1.h[5] +; CHECK-NEXT: mov z21.h, z1.h[3] +; CHECK-NEXT: mov z0.h, z1.h[1] +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: fmov w9, s20 +; CHECK-NEXT: strh w10, [sp, #24] +; CHECK-NEXT: fmov w10, s21 +; CHECK-NEXT: strh w8, [sp, #22] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #20] +; CHECK-NEXT: strh w10, [sp, #18] +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: add z0.h, z1.h, z0.h +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <8 x i16>, <8 x i16>* %a + %tmp2 = load <8 x i16>, <8 x i16>* %b + %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp5 = add <8 x i16> %tmp3, %tmp4 + store <8 x i16> %tmp5, <8 x i16>* %a + ret void +} + +define void @uzp_v8i32_undef(<8 x i32>* %a) #0{ +; CHECK-LABEL: uzp_v8i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: mov z5.s, z1.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z3.s, z0.s[3] +; CHECK-NEXT: mov z4.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z0.s, z1.s[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w12, s3 +; CHECK-NEXT: stp w8, w9, [sp, #8] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: stp w10, w11, [sp] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: stp w8, w12, [sp, #24] +; CHECK-NEXT: stp w10, w9, [sp, #16] +; CHECK-NEXT: ldp q0, q1, [sp] +; CHECK-NEXT: add z0.s, z0.s, z1.s +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %tmp1 = load <8 x i32>, <8 x i32>* %a + %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> + %tmp5 = add <8 x i32> %tmp3, %tmp4 + store <8 x i32> %tmp5, <8 x i32>* %a + ret void +} + +define void @zip_vscale2_4(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: zip_vscale2_4: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: zip1 z4.d, z1.d, z2.d +; CHECK-NEXT: trn2 z1.d, z1.d, z2.d +; CHECK-NEXT: zip1 z2.d, z0.d, z3.d +; CHECK-NEXT: trn2 z0.d, z0.d, z3.d +; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z4.d +; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: stp q2, q0, [x0] +; CHECK-NEXT: ret + %tmp1 = load <4 x double>, <4 x double>* %a + %tmp2 = load <4 x double>, <4 x double>* %b + %tmp3 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x double> %tmp1, <4 x double> %tmp2, <4 x i32> + %tmp5 = fadd <4 x double> %tmp3, %tmp4 + store <4 x double> %tmp5, <4 x double>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll @@ -0,0 +1,320 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define i1 @ptest_v16i1_256bit_min_sve(float* %a, float * %b) #0 { +; CHECK-LABEL: ptest_v16i1_256bit_min_sve: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0, #32] +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: fcmne p2.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z2.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z4.s, z0.s +; CHECK-NEXT: fcmne p0.s, p0/z, z3.s, z0.s +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: mov z3.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: splice z1.h, p1, z1.h, z2.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z2.h, z3.h, z3.h +; CHECK-NEXT: splice z2.h, p1, z2.h, z0.h +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z2.b, z2.b +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: ret + %v0 = bitcast float* %a to <16 x float>* + %v1 = load <16 x float>, <16 x float>* %v0, align 4 + %v2 = fcmp une <16 x float> %v1, zeroinitializer + %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2) + ret i1 %v3 +} + +define i1 @ptest_v16i1_512bit_min_sve(float* %a, float * %b) #0 { +; CHECK-LABEL: ptest_v16i1_512bit_min_sve: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0, #32] +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: fcmne p2.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z2.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z4.s, z0.s +; CHECK-NEXT: fcmne p0.s, p0/z, z3.s, z0.s +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: mov z3.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: splice z1.h, p1, z1.h, z2.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z2.h, z3.h, z3.h +; CHECK-NEXT: splice z2.h, p1, z2.h, z0.h +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z2.b, z2.b +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: ret + %v0 = bitcast float* %a to <16 x float>* + %v1 = load <16 x float>, <16 x float>* %v0, align 4 + %v2 = fcmp une <16 x float> %v1, zeroinitializer + %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2) + ret i1 %v3 +} + +define i1 @ptest_v16i1_512bit_sve(float* %a, float * %b) #0 { +; CHECK-LABEL: ptest_v16i1_512bit_sve: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0, #32] +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: fcmne p2.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z2.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z4.s, z0.s +; CHECK-NEXT: fcmne p0.s, p0/z, z3.s, z0.s +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: mov z3.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: splice z1.h, p1, z1.h, z2.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z2.h, z3.h, z3.h +; CHECK-NEXT: splice z2.h, p1, z2.h, z0.h +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z2.b, z2.b +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: ret + %v0 = bitcast float* %a to <16 x float>* + %v1 = load <16 x float>, <16 x float>* %v0, align 4 + %v2 = fcmp une <16 x float> %v1, zeroinitializer + %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2) + ret i1 %v3 +} + +define i1 @ptest_or_v16i1_512bit_min_sve(float* %a, float * %b) #0 { +; CHECK-LABEL: ptest_or_v16i1_512bit_min_sve: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q0, q2, [x0, #32] +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: fcmne p2.s, p0/z, z2.s, z1.s +; CHECK-NEXT: fcmne p3.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.s, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z2.h, p1, z2.h, z0.h +; CHECK-NEXT: ldp q0, q5, [x1, #32] +; CHECK-NEXT: fcmne p2.s, p0/z, z4.s, z1.s +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b +; CHECK-NEXT: mov z4.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z3.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: fcmne p3.s, p0/z, z0.s, z1.s +; CHECK-NEXT: splice z3.h, p1, z3.h, z4.h +; CHECK-NEXT: fcmne p2.s, p0/z, z5.s, z1.s +; CHECK-NEXT: uzp1 z3.b, z3.b, z3.b +; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: fcmne p2.s, p0/z, z5.s, z1.s +; CHECK-NEXT: fcmne p0.s, p0/z, z4.s, z1.s +; CHECK-NEXT: mov z5.s, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z4.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z5.h, z5.h, z5.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: splice z5.h, p1, z5.h, z0.h +; CHECK-NEXT: splice z4.h, p1, z4.h, z1.h +; CHECK-NEXT: ptrue p3.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z5.b, z5.b +; CHECK-NEXT: uzp1 z1.b, z4.b, z4.b +; CHECK-NEXT: splice z3.b, p3, z3.b, z2.b +; CHECK-NEXT: splice z1.b, p3, z1.b, z0.b +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: orr z0.d, z3.d, z1.d +; CHECK-NEXT: orv b0, p0, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: ret + %v0 = bitcast float* %a to <16 x float>* + %v1 = load <16 x float>, <16 x float>* %v0, align 4 + %v2 = fcmp une <16 x float> %v1, zeroinitializer + %v3 = bitcast float* %b to <16 x float>* + %v4 = load <16 x float>, <16 x float>* %v3, align 4 + %v5 = fcmp une <16 x float> %v4, zeroinitializer + %v6 = or <16 x i1> %v2, %v5 + %v7 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v6) + ret i1 %v7 +} + +declare i1 @llvm.vector.reduce.or.i1.v16i1(<16 x i1>) + +; +; AND reduction. +; + +define i1 @ptest_and_v16i1_512bit_sve(float* %a, float * %b) #0 { +; CHECK-LABEL: ptest_and_v16i1_512bit_sve: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q0, q2, [x0, #32] +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: fcmne p2.s, p0/z, z2.s, z1.s +; CHECK-NEXT: fcmne p3.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.s, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z2.h, p1, z2.h, z0.h +; CHECK-NEXT: ldp q0, q5, [x1, #32] +; CHECK-NEXT: fcmne p2.s, p0/z, z4.s, z1.s +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b +; CHECK-NEXT: mov z4.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z3.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: fcmne p3.s, p0/z, z0.s, z1.s +; CHECK-NEXT: splice z3.h, p1, z3.h, z4.h +; CHECK-NEXT: fcmne p2.s, p0/z, z5.s, z1.s +; CHECK-NEXT: uzp1 z3.b, z3.b, z3.b +; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: fcmne p2.s, p0/z, z5.s, z1.s +; CHECK-NEXT: fcmne p0.s, p0/z, z4.s, z1.s +; CHECK-NEXT: mov z5.s, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z4.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z5.h, z5.h, z5.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: splice z5.h, p1, z5.h, z0.h +; CHECK-NEXT: splice z4.h, p1, z4.h, z1.h +; CHECK-NEXT: ptrue p3.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z5.b, z5.b +; CHECK-NEXT: uzp1 z1.b, z4.b, z4.b +; CHECK-NEXT: splice z3.b, p3, z3.b, z2.b +; CHECK-NEXT: splice z1.b, p3, z1.b, z0.b +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: and z0.d, z3.d, z1.d +; CHECK-NEXT: andv b0, p0, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: ret + %v0 = bitcast float* %a to <16 x float>* + %v1 = load <16 x float>, <16 x float>* %v0, align 4 + %v2 = fcmp une <16 x float> %v1, zeroinitializer + %v3 = bitcast float* %b to <16 x float>* + %v4 = load <16 x float>, <16 x float>* %v3, align 4 + %v5 = fcmp une <16 x float> %v4, zeroinitializer + %v6 = and <16 x i1> %v2, %v5 + %v7 = call i1 @llvm.vector.reduce.and.i1.v16i1 (<16 x i1> %v6) + ret i1 %v7 +} + +define i1 @ptest_and_v16i1_512bit_min_sve(float* %a, float * %b) #0 { +; CHECK-LABEL: ptest_and_v16i1_512bit_min_sve: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q0, q2, [x0, #32] +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: ldp q3, q4, [x0] +; CHECK-NEXT: fcmne p2.s, p0/z, z2.s, z1.s +; CHECK-NEXT: fcmne p3.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.s, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z2.h, p1, z2.h, z0.h +; CHECK-NEXT: ldp q0, q5, [x1, #32] +; CHECK-NEXT: fcmne p2.s, p0/z, z4.s, z1.s +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b +; CHECK-NEXT: mov z4.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmne p2.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z3.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: fcmne p3.s, p0/z, z0.s, z1.s +; CHECK-NEXT: splice z3.h, p1, z3.h, z4.h +; CHECK-NEXT: fcmne p2.s, p0/z, z5.s, z1.s +; CHECK-NEXT: uzp1 z3.b, z3.b, z3.b +; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: mov z0.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: fcmne p2.s, p0/z, z5.s, z1.s +; CHECK-NEXT: fcmne p0.s, p0/z, z4.s, z1.s +; CHECK-NEXT: mov z5.s, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z4.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z5.h, z5.h, z5.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: splice z5.h, p1, z5.h, z0.h +; CHECK-NEXT: splice z4.h, p1, z4.h, z1.h +; CHECK-NEXT: ptrue p3.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z5.b, z5.b +; CHECK-NEXT: uzp1 z1.b, z4.b, z4.b +; CHECK-NEXT: splice z3.b, p3, z3.b, z2.b +; CHECK-NEXT: splice z1.b, p3, z1.b, z0.b +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: and z0.d, z3.d, z1.d +; CHECK-NEXT: andv b0, p0, z0.b +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: ret + %v0 = bitcast float* %a to <16 x float>* + %v1 = load <16 x float>, <16 x float>* %v0, align 4 + %v2 = fcmp une <16 x float> %v1, zeroinitializer + %v3 = bitcast float* %b to <16 x float>* + %v4 = load <16 x float>, <16 x float>* %v3, align 4 + %v5 = fcmp une <16 x float> %v4, zeroinitializer + %v6 = and <16 x i1> %v2, %v5 + %v7 = call i1 @llvm.vector.reduce.and.i1.v16i1 (<16 x i1> %v6) + ret i1 %v7 +} + +attributes #0 = { "target-features"="+sve" } + +declare i1 @llvm.vector.reduce.and.i1.v16i1(<16 x i1>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll @@ -0,0 +1,331 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; i8 +define void @subvector_v4i8(<4 x i8> *%in, <4 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v4i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i8>, <4 x i8>* %in + br label %bb1 + +bb1: + store <4 x i8> %a, <4 x i8>* %out + ret void +} + +define void @subvector_v8i8(<8 x i8> *%in, <8 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v8i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %in + br label %bb1 + +bb1: + store <8 x i8> %a, <8 x i8>* %out + ret void +} + +define void @subvector_v16i8(<16 x i8> *%in, <16 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v16i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %in + br label %bb1 + +bb1: + store <16 x i8> %a, <16 x i8>* %out + ret void +} + +define void @subvector_v32i8(<32 x i8> *%in, <32 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v32i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %in + br label %bb1 + +bb1: + store <32 x i8> %a, <32 x i8>* %out + ret void +} + +; i16 +define void @subvector_v2i16(<2 x i16> *%in, <2 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v2i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w8, [x0, #2] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <2 x i16>, <2 x i16>* %in + br label %bb1 + +bb1: + store <2 x i16> %a, <2 x i16>* %out + ret void +} + +define void @subvector_v4i16(<4 x i16> *%in, <4 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v4i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i16>, <4 x i16>* %in + br label %bb1 + +bb1: + store <4 x i16> %a, <4 x i16>* %out + ret void +} + +define void @subvector_v8i16(<8 x i16> *%in, <8 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v8i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %in + br label %bb1 + +bb1: + store <8 x i16> %a, <8 x i16>* %out + ret void +} + +define void @subvector_v16i16(<16 x i16> *%in, <16 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v16i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %in + br label %bb1 + +bb1: + store <16 x i16> %a, <16 x i16>* %out + ret void +} + +; i32 +define void @subvector_v2i32(<2 x i32> *%in, <2 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v2i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i32>, <2 x i32>* %in + br label %bb1 + +bb1: + store <2 x i32> %a, <2 x i32>* %out + ret void +} + +define void @subvector_v4i32(<4 x i32> *%in, <4 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v4i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %in + br label %bb1 + +bb1: + store <4 x i32> %a, <4 x i32>* %out + ret void +} + +define void @subvector_v8i32(<8 x i32> *%in, <8 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v8i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + br label %bb1 + +bb1: + store <8 x i32> %a, <8 x i32>* %out + ret void +} + +; i64 +define void @subvector_v2i64(<2 x i64> *%in, <2 x i64>* %out) #0 { +; CHECK-LABEL: subvector_v2i64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %in + br label %bb1 + +bb1: + store <2 x i64> %a, <2 x i64>* %out + ret void +} + +define void @subvector_v4i64(<4 x i64> *%in, <4 x i64>* %out) #0 { +; CHECK-LABEL: subvector_v4i64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + br label %bb1 + +bb1: + store <4 x i64> %a, <4 x i64>* %out + ret void +} + +; f16 +define void @subvector_v2f16(<2 x half> *%in, <2 x half>* %out) #0 { +; CHECK-LABEL: subvector_v2f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr w8, [x0] +; CHECK-NEXT: str w8, [x1] +; CHECK-NEXT: ret + %a = load <2 x half>, <2 x half>* %in + br label %bb1 + +bb1: + store <2 x half> %a, <2 x half>* %out + ret void +} + +define void @subvector_v4f16(<4 x half> *%in, <4 x half>* %out) #0 { +; CHECK-LABEL: subvector_v4f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <4 x half>, <4 x half>* %in + br label %bb1 + +bb1: + store <4 x half> %a, <4 x half>* %out + ret void +} + +define void @subvector_v8f16(<8 x half> *%in, <8 x half>* %out) #0 { +; CHECK-LABEL: subvector_v8f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x half>, <8 x half>* %in + br label %bb1 + +bb1: + store <8 x half> %a, <8 x half>* %out + ret void +} + +define void @subvector_v16f16(<16 x half> *%in, <16 x half>* %out) #0 { +; CHECK-LABEL: subvector_v16f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <16 x half>, <16 x half>* %in + br label %bb1 + +bb1: + store <16 x half> %a, <16 x half>* %out + ret void +} + +; f32 +define void @subvector_v2f32(<2 x float> *%in, <2 x float>* %out) #0 { +; CHECK-LABEL: subvector_v2f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <2 x float>, <2 x float>* %in + br label %bb1 + +bb1: + store <2 x float> %a, <2 x float>* %out + ret void +} + +define void @subvector_v4f32(<4 x float> *%in, <4 x float>* %out) #0 { +; CHECK-LABEL: subvector_v4f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <4 x float>, <4 x float>* %in + br label %bb1 + +bb1: + store <4 x float> %a, <4 x float>* %out + ret void +} + +define void @subvector_v8f32(<8 x float> *%in, <8 x float>* %out) #0 { +; CHECK-LABEL: subvector_v8f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <8 x float>, <8 x float>* %in + br label %bb1 + +bb1: + store <8 x float> %a, <8 x float>* %out + ret void +} + +; f64 +define void @subvector_v2f64(<2 x double> *%in, <2 x double>* %out) #0 { +; CHECK-LABEL: subvector_v2f64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <2 x double>, <2 x double>* %in + br label %bb1 + +bb1: + store <2 x double> %a, <2 x double>* %out + ret void +} + +define void @subvector_v4f64(<4 x double> *%in, <4 x double>* %out) #0 { +; CHECK-LABEL: subvector_v4f64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <4 x double>, <4 x double>* %in + br label %bb1 + +bb1: + store <4 x double> %a, <4 x double>* %out + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll @@ -6,35 +6,9 @@ define void @store_trunc_v8i16i8(<8 x i16>* %ap, <8 x i8>* %dest) #0 { ; CHECK-LABEL: store_trunc_v8i16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [x0] -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z2.h, z0.h[6] -; CHECK-NEXT: mov z3.h, z0.h[5] -; CHECK-NEXT: mov z4.h, z0.h[4] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z0.h[3] -; CHECK-NEXT: mov z6.h, z0.h[2] -; CHECK-NEXT: mov z0.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b ; CHECK-NEXT: str d0, [x1] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %ap %val = trunc <8 x i16> %a to <8 x i8> @@ -45,24 +19,10 @@ define void @store_trunc_v4i32i8(<4 x i32>* %ap, <4 x i8>* %dest) #0 { ; CHECK-LABEL: store_trunc_v4i32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: st1b { z0.h }, p0, [x1] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %ap %val = trunc <4 x i32> %a to <4 x i8> @@ -73,23 +33,9 @@ define void @store_trunc_v4i32i16(<4 x i32>* %ap, <4 x i16>* %dest) #0 { ; CHECK-LABEL: store_trunc_v4i32i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldr q0, [x0] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: str d0, [x1] -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %ap %val = trunc <4 x i32> %a to <4 x i16> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll @@ -24,106 +24,18 @@ define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 { ; CHECK-LABEL: trunc_v32i16_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: ldp q1, q0, [x0, #32] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z17.h, z1.h[6] -; CHECK-NEXT: mov z18.h, z1.h[5] -; CHECK-NEXT: mov z19.h, z1.h[4] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z2.h, z0.h[7] -; CHECK-NEXT: mov z3.h, z0.h[6] -; CHECK-NEXT: mov z4.h, z0.h[5] -; CHECK-NEXT: ldp q22, q23, [x0] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #24] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #16] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z0.h[4] -; CHECK-NEXT: mov z6.h, z0.h[3] -; CHECK-NEXT: mov z7.h, z0.h[2] -; CHECK-NEXT: strb w10, [sp, #31] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #30] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #29] -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: mov z16.h, z0.h[1] -; CHECK-NEXT: mov z0.h, z1.h[7] -; CHECK-NEXT: strb w10, [sp, #28] -; CHECK-NEXT: fmov w10, s16 -; CHECK-NEXT: strb w8, [sp, #27] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strb w9, [sp, #26] -; CHECK-NEXT: fmov w9, s17 -; CHECK-NEXT: mov z20.h, z1.h[3] -; CHECK-NEXT: strb w10, [sp, #25] -; CHECK-NEXT: fmov w10, s18 -; CHECK-NEXT: strb w8, [sp, #23] -; CHECK-NEXT: fmov w8, s19 -; CHECK-NEXT: strb w9, [sp, #22] -; CHECK-NEXT: fmov w9, s20 -; CHECK-NEXT: mov z21.h, z1.h[2] -; CHECK-NEXT: mov z0.h, z1.h[1] -; CHECK-NEXT: strb w10, [sp, #21] -; CHECK-NEXT: fmov w10, s21 -; CHECK-NEXT: strb w8, [sp, #20] -; CHECK-NEXT: strb w9, [sp, #19] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s23 -; CHECK-NEXT: mov z0.h, z23.h[7] -; CHECK-NEXT: mov z1.h, z23.h[6] -; CHECK-NEXT: strb w10, [sp, #18] -; CHECK-NEXT: fmov w10, s22 -; CHECK-NEXT: strb w8, [sp, #17] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strb w9, [sp, #8] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z2.h, z23.h[5] -; CHECK-NEXT: mov z3.h, z23.h[4] -; CHECK-NEXT: mov z4.h, z23.h[3] -; CHECK-NEXT: strb w10, [sp] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #15] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #14] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z23.h[2] -; CHECK-NEXT: mov z6.h, z23.h[1] -; CHECK-NEXT: mov z7.h, z22.h[7] -; CHECK-NEXT: strb w10, [sp, #13] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #12] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #11] -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: mov z16.h, z22.h[6] -; CHECK-NEXT: mov z17.h, z22.h[5] -; CHECK-NEXT: mov z18.h, z22.h[4] -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov w10, s16 -; CHECK-NEXT: strb w8, [sp, #9] -; CHECK-NEXT: fmov w8, s17 -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s18 -; CHECK-NEXT: mov z19.h, z22.h[3] -; CHECK-NEXT: mov z20.h, z22.h[2] -; CHECK-NEXT: mov z21.h, z22.h[1] -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s19 -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s20 -; CHECK-NEXT: strb w9, [sp, #4] -; CHECK-NEXT: fmov w9, s21 -; CHECK-NEXT: strb w10, [sp, #3] -; CHECK-NEXT: strb w8, [sp, #2] -; CHECK-NEXT: strb w9, [sp, #1] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: add z1.b, z1.b, z1.b +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b ; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z3.b, z3.b, z3.b +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b +; CHECK-NEXT: splice z3.b, p0, z3.b, z2.b +; CHECK-NEXT: add z1.b, z3.b, z3.b ; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %in %b = trunc <32 x i16> %a to <32 x i8> @@ -189,54 +101,20 @@ define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 { ; CHECK-LABEL: trunc_v16i32_v16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: ldp q1, q0, [x0, #32] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z7.s, z1.s[2] -; CHECK-NEXT: mov z16.s, z1.s[1] -; CHECK-NEXT: ldp q2, q3, [x0] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z4.s, z0.s[3] -; CHECK-NEXT: mov z5.s, z0.s[2] -; CHECK-NEXT: mov z6.s, z0.s[1] -; CHECK-NEXT: strb w9, [sp, #8] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strb w8, [sp, #12] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: mov z19.s, z2.s[2] -; CHECK-NEXT: fmov w10, s3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: strb w8, [sp] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov z1.s, z3.s[3] -; CHECK-NEXT: strb w10, [sp, #4] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: mov z17.s, z3.s[2] -; CHECK-NEXT: mov z18.s, z3.s[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w9, [sp, #11] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strb w8, [sp, #9] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z20.s, z2.s[1] -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s19 -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s20 -; CHECK-NEXT: strb w9, [sp, #3] -; CHECK-NEXT: strb w10, [sp, #2] -; CHECK-NEXT: strb w8, [sp, #1] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p0, z3.h, z2.h +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z3.b, z3.b +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %in %b = trunc <16 x i32> %a to <16 x i8> @@ -302,58 +180,18 @@ define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 { ; CHECK-LABEL: trunc_v16i32_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: ldp q1, q0, [x0, #32] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z5.s, z1.s[2] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z2.s, z0.s[3] -; CHECK-NEXT: mov z3.s, z0.s[2] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: ldp q6, q7, [x0] -; CHECK-NEXT: strh w8, [sp, #24] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z4.s, z0.s[1] -; CHECK-NEXT: mov z0.s, z1.s[3] -; CHECK-NEXT: strh w9, [sp, #16] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strh w10, [sp, #30] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strh w8, [sp, #28] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: mov z0.s, z1.s[1] -; CHECK-NEXT: strh w9, [sp, #26] -; CHECK-NEXT: strh w10, [sp, #22] -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: strh w8, [sp, #20] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w10, s6 -; CHECK-NEXT: mov z0.s, z7.s[3] -; CHECK-NEXT: mov z1.s, z7.s[2] -; CHECK-NEXT: mov z2.s, z7.s[1] -; CHECK-NEXT: strh w8, [sp, #18] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #8] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: strh w10, [sp] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: mov z3.s, z6.s[3] -; CHECK-NEXT: mov z4.s, z6.s[2] -; CHECK-NEXT: mov z5.s, z6.s[1] -; CHECK-NEXT: strh w8, [sp, #14] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strh w9, [sp, #12] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: strh w10, [sp, #10] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strh w8, [sp, #6] -; CHECK-NEXT: strh w9, [sp, #4] -; CHECK-NEXT: strh w10, [sp, #2] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: add z1.h, z1.h, z1.h +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h ; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p0, z3.h, z2.h +; CHECK-NEXT: add z1.h, z3.h, z3.h ; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %in %b = trunc <16 x i32> %a to <16 x i16> @@ -420,31 +258,21 @@ define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %in) #0 { ; CHECK-LABEL: trunc_v8i64_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: ldp q1, q0, [x0, #32] -; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ldp q3, q2, [x0] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov x9, d4 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: strb w8, [sp, #14] -; CHECK-NEXT: fmov x8, d3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov x10, d2 -; CHECK-NEXT: mov z1.d, z2.d[1] -; CHECK-NEXT: mov z2.d, z3.d[1] -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: strb w8, [sp, #11] -; CHECK-NEXT: strb w10, [sp, #13] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z1.h, z3.h, z3.h +; CHECK-NEXT: splice z1.h, p0, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %in %b = trunc <8 x i64> %a to <8 x i8> @@ -507,30 +335,20 @@ define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 { ; CHECK-LABEL: trunc_v8i64_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: ldp q1, q0, [x0, #32] -; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ldp q3, q2, [x0] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: strh w9, [sp, #8] -; CHECK-NEXT: fmov x9, d4 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: strh w8, [sp, #12] -; CHECK-NEXT: fmov x8, d3 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: fmov x10, d2 -; CHECK-NEXT: mov z1.d, z2.d[1] -; CHECK-NEXT: mov z2.d, z3.d[1] -; CHECK-NEXT: strh w8, [sp] -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: strh w10, [sp, #4] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: strh w8, [sp, #6] -; CHECK-NEXT: strh w10, [sp, #10] -; CHECK-NEXT: strh w9, [sp, #2] -; CHECK-NEXT: ldr q0, [sp], #16 +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: uzp1 z1.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z3.h, z3.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %in %b = trunc <8 x i64> %a to <8 x i16> @@ -596,30 +414,18 @@ define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 { ; CHECK-LABEL: trunc_v8i64_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: ldp q1, q0, [x0, #32] -; CHECK-NEXT: fmov x9, d1 -; CHECK-NEXT: ldp q2, q3, [x0] -; CHECK-NEXT: mov z4.d, z0.d[1] -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: mov z0.d, z1.d[1] -; CHECK-NEXT: fmov x10, d4 -; CHECK-NEXT: fmov x12, d0 -; CHECK-NEXT: mov z0.d, z2.d[1] -; CHECK-NEXT: stp w8, w10, [sp, #24] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: mov z1.d, z3.d[1] -; CHECK-NEXT: fmov x11, d3 -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: stp w9, w12, [sp, #16] -; CHECK-NEXT: fmov x9, d2 -; CHECK-NEXT: stp w11, w8, [sp, #8] -; CHECK-NEXT: stp w9, w10, [sp] -; CHECK-NEXT: ldp q1, q0, [sp] -; CHECK-NEXT: add z1.s, z1.s, z1.s +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s ; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: add z1.s, z3.s, z3.s ; CHECK-NEXT: stp q1, q0, [x1] -; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %in %b = trunc <8 x i64> %a to <8 x i32> diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll @@ -0,0 +1,356 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; define <4 x i8> @shuffle_ext_byone_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; %ret = shufflevector <4 x i8> %op1, <4 x i8> %op2, <4 x i32> +; ret <4 x i8> %ret +; } + +define <8 x i8> @shuffle_ext_byone_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.b, z0.b[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <8 x i8> %op1, <8 x i8> %op2, <8 x i32> + ret <8 x i8> %ret +} + +define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <16 x i8> %op1, <16 x i8> %op2, <16 x i32> + ret <16 x i8> %ret +} + +define void @shuffle_ext_byone_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: mov z2.b, z1.b[15] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: insr z0.b, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %ret = shufflevector <32 x i8> %op1, <32 x i8> %op2, <32 x i32> + store <32 x i8> %ret, <32 x i8>* %a + ret void +} + +; define <2 x i16> @shuffle_ext_byone_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; %ret = shufflevector <2 x i16> %op1, <2 x i16> %op2, <2 x i32> +; ret <2 x i16> %ret +; } + +define <4 x i16> @shuffle_ext_byone_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <4 x i16> %op1, <4 x i16> %op2, <4 x i32> + ret <4 x i16> %ret +} + +define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <8 x i16> %op1, <8 x i16> %op2, <8 x i32> + ret <8 x i16> %ret +} + +define void @shuffle_ext_byone_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: mov z2.h, z1.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: insr z0.h, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %ret = shufflevector <16 x i16> %op1, <16 x i16> %op2, <16 x i32> + store <16 x i16> %ret, <16 x i16>* %a + ret void +} + +define <2 x i32> @shuffle_ext_byone_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <2 x i32> %op1, <2 x i32> %op2, <2 x i32> + ret <2 x i32> %ret +} + +define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <4 x i32> %op1, <4 x i32> %op2, <4 x i32> + ret <4 x i32> %ret +} + +define void @shuffle_ext_byone_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: insr z0.s, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %ret = shufflevector <8 x i32> %op1, <8 x i32> %op2, <8 x i32> + store <8 x i32> %ret, <8 x i32>* %a + ret void +} + +define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: insr z1.d, x8 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <2 x i64> %op1, <2 x i64> %op2, <2 x i32> + ret <2 x i64> %ret +} + +define void @shuffle_ext_byone_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: mov z2.d, z1.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: insr z1.d, x8 +; CHECK-NEXT: insr z0.d, x9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %ret = shufflevector <4 x i64> %op1, <4 x i64> %op2, <4 x i32> + store <4 x i64> %ret, <4 x i64>* %a + ret void +} + + +define <4 x half> @shuffle_ext_byone_v4f16(<4 x half> %op1, <4 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <4 x half> %op1, <4 x half> %op2, <4 x i32> + ret <4 x half> %ret +} + +define <8 x half> @shuffle_ext_byone_v8f16(<8 x half> %op1, <8 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <8 x half> %op1, <8 x half> %op2, <8 x i32> + ret <8 x half> %ret +} + +define void @shuffle_ext_byone_v16f16(<16 x half>* %a, <16 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.h, z1.h[7] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.h, h3 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b + %ret = shufflevector <16 x half> %op1, <16 x half> %op2, <16 x i32> + store <16 x half> %ret, <16 x half>* %a + ret void +} + +define <2 x float> @shuffle_ext_byone_v2f32(<2 x float> %op1, <2 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <2 x float> %op1, <2 x float> %op2, <2 x i32> + ret <2 x float> %ret +} + +define <4 x float> @shuffle_ext_byone_v4f32(<4 x float> %op1, <4 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <4 x float> %op1, <4 x float> %op2, <4 x i32> + ret <4 x float> %ret +} + +define void @shuffle_ext_byone_v8f32(<8 x float>* %a, <8 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.s, s3 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b + %ret = shufflevector <8 x float> %op1, <8 x float> %op2, <8 x i32> + store <8 x float> %ret, <8 x float>* %a + ret void +} + +define <2 x double> @shuffle_ext_byone_v2f64(<2 x double> %op1, <2 x double> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %ret = shufflevector <2 x double> %op1, <2 x double> %op2, <2 x i32> + ret <2 x double> %ret +} + +define void @shuffle_ext_byone_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.d, z1.d[1] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.d, d3 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_byone_reverse(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_reverse: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: mov z3.d, z1.d[1] +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: insr z2.d, d3 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_invalid: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" }