diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -135,6 +135,11 @@ cl::desc("DAG combiner enable load//store with " "a narrower store")); +static cl::opt EnableVectorFCopySignExtendRound( + "combiner-vector-fcopysign-extend-round", cl::Hidden, cl::init(false), + cl::desc( + "Enable merging extends and rounds into FCOPYSIGN on vector types")); + namespace { class DAGCombiner { @@ -15419,11 +15424,7 @@ if (N1Op0VT == MVT::f128) return false; - // Avoid mismatched vector operand types, for better instruction selection. - if (N1Op0VT.isVector()) - return false; - - return true; + return !N1Op0VT.isVector() || EnableVectorFCopySignExtendRound; } return false; } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -3597,7 +3597,26 @@ SDValue DAGTypeLegalizer::SplitVecOp_FCOPYSIGN(SDNode *N) { // The result (and the first input) has a legal vector type, but the second // input needs splitting. - return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements()); + + SDLoc DL(N); + + EVT LHSLoVT, LHSHiVT; + std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); + + if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT)) + return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements()); + + SDValue LHSLo, LHSHi; + std::tie(LHSLo, LHSHi) = + DAG.SplitVector(N->getOperand(0), DL, LHSLoVT, LHSHiVT); + + SDValue RHSLo, RHSHi; + std::tie(RHSLo, RHSHi) = DAG.SplitVector(N->getOperand(1), DL); + + SDValue Lo = DAG.getNode(ISD::FCOPYSIGN, DL, LHSLoVT, LHSLo, RHSLo); + SDValue Hi = DAG.getNode(ISD::FCOPYSIGN, DL, LHSHiVT, LHSHi, RHSHi); + + return DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), Lo, Hi); } SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1646,6 +1646,7 @@ setOperationAction(ISD::FADD, VT, Custom); setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::FCEIL, VT, Custom); + setOperationAction(ISD::FCOPYSIGN, VT, Custom); setOperationAction(ISD::FDIV, VT, Custom); setOperationAction(ISD::FFLOOR, VT, Custom); setOperationAction(ISD::FMA, VT, Custom); @@ -7774,18 +7775,22 @@ SDValue In2 = Op.getOperand(1); EVT SrcVT = In2.getValueType(); - if (SrcVT.bitsLT(VT)) - In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2); - else if (SrcVT.bitsGT(VT)) - In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, - DAG.getIntPtrConstant(0, DL, /*isTarget=*/true)); + if (!SrcVT.bitsEq(VT)) + In2 = DAG.getFPExtendOrRound(In2, DL, VT); if (VT.isScalableVector()) IntVT = getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger()); - if (VT != In2.getValueType()) - return SDValue(); + if (VT.isFixedLengthVector() && useSVEForFixedLengthVectorVT(VT)) { + EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); + + In1 = convertToScalableVector(DAG, ContainerVT, In1); + In2 = convertToScalableVector(DAG, ContainerVT, In2); + + SDValue Res = DAG.getNode(ISD::FCOPYSIGN, DL, ContainerVT, In1, In2); + return convertFromScalableVector(DAG, VT, Res); + } auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) { if (VT.isScalableVector()) @@ -19516,18 +19521,13 @@ } static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG, - const AArch64Subtarget *Subtarget, - bool fixedSVEVectorVT) { + const AArch64Subtarget *Subtarget) { EVT VT = N->getValueType(0); - // Don't expand for SVE2 + // Don't expand for NEON, SVE2 or SME if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME()) return SDValue(); - // Don't expand for NEON - if (VT.isFixedLengthVector() && !fixedSVEVectorVT) - return SDValue(); - SDLoc DL(N); SDValue Mask = N->getOperand(0); @@ -19699,8 +19699,7 @@ case AArch64ISD::SUNPKLO: return performSunpkloCombine(N, DAG); case AArch64ISD::BSP: - return performBSPExpandForSVE( - N, DAG, Subtarget, useSVEForFixedLengthVectorVT(N->getValueType(0))); + return performBSPExpandForSVE(N, DAG, Subtarget); case ISD::INSERT_VECTOR_ELT: return performInsertVectorEltCombine(N, DCI); case ISD::EXTRACT_VECTOR_ELT: diff --git a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll --- a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll +++ b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve -o - | FileCheck --check-prefixes=CHECK %s - +; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve -o - | FileCheck --check-prefixes=CHECK,CHECK-NO-EXTEND-ROUND %s +; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK-EXTEND-ROUND %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" ;============ v2f32 @@ -47,16 +47,32 @@ ; SplitVecOp #1 define @test_copysign_v4f32_v4f64( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v4f32_v4f64: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff -; CHECK-NEXT: fcvt z2.s, p0/m, z2.d -; CHECK-NEXT: fcvt z1.s, p0/m, z1.d -; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s -; CHECK-NEXT: and z1.s, z1.s, #0x80000000 -; CHECK-NEXT: orr z0.d, z0.d, z1.d -; CHECK-NEXT: ret +; CHECK-NO-EXTEND-ROUND-LABEL: test_copysign_v4f32_v4f64: +; CHECK-NO-EXTEND-ROUND: // %bb.0: +; CHECK-NO-EXTEND-ROUND-NEXT: ptrue p0.d +; CHECK-NO-EXTEND-ROUND-NEXT: and z0.s, z0.s, #0x7fffffff +; CHECK-NO-EXTEND-ROUND-NEXT: fcvt z2.s, p0/m, z2.d +; CHECK-NO-EXTEND-ROUND-NEXT: fcvt z1.s, p0/m, z1.d +; CHECK-NO-EXTEND-ROUND-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NO-EXTEND-ROUND-NEXT: and z1.s, z1.s, #0x80000000 +; CHECK-NO-EXTEND-ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NO-EXTEND-ROUND-NEXT: ret +; +; CHECK-EXTEND-ROUND-LABEL: test_copysign_v4f32_v4f64: +; CHECK-EXTEND-ROUND: // %bb.0: +; CHECK-EXTEND-ROUND-NEXT: ptrue p0.d +; CHECK-EXTEND-ROUND-NEXT: uunpkhi z3.d, z0.s +; CHECK-EXTEND-ROUND-NEXT: fcvt z2.s, p0/m, z2.d +; CHECK-EXTEND-ROUND-NEXT: fcvt z1.s, p0/m, z1.d +; CHECK-EXTEND-ROUND-NEXT: uunpklo z0.d, z0.s +; CHECK-EXTEND-ROUND-NEXT: and z2.s, z2.s, #0x80000000 +; CHECK-EXTEND-ROUND-NEXT: and z3.s, z3.s, #0x7fffffff +; CHECK-EXTEND-ROUND-NEXT: and z1.s, z1.s, #0x80000000 +; CHECK-EXTEND-ROUND-NEXT: and z0.s, z0.s, #0x7fffffff +; CHECK-EXTEND-ROUND-NEXT: orr z2.d, z3.d, z2.d +; CHECK-EXTEND-ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK-EXTEND-ROUND-NEXT: uzp1 z0.s, z0.s, z2.s +; CHECK-EXTEND-ROUND-NEXT: ret %tmp0 = fptrunc %b to %r = call @llvm.copysign.v4f32( %a, %tmp0) ret %r @@ -161,16 +177,32 @@ } define @test_copysign_v4f16_v4f64( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v4f16_v4f64: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: and z0.h, z0.h, #0x7fff -; CHECK-NEXT: fcvt z2.h, p0/m, z2.d -; CHECK-NEXT: fcvt z1.h, p0/m, z1.d -; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s -; CHECK-NEXT: and z1.h, z1.h, #0x8000 -; CHECK-NEXT: orr z0.d, z0.d, z1.d -; CHECK-NEXT: ret +; CHECK-NO-EXTEND-ROUND-LABEL: test_copysign_v4f16_v4f64: +; CHECK-NO-EXTEND-ROUND: // %bb.0: +; CHECK-NO-EXTEND-ROUND-NEXT: ptrue p0.d +; CHECK-NO-EXTEND-ROUND-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-NO-EXTEND-ROUND-NEXT: fcvt z2.h, p0/m, z2.d +; CHECK-NO-EXTEND-ROUND-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK-NO-EXTEND-ROUND-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NO-EXTEND-ROUND-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-NO-EXTEND-ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NO-EXTEND-ROUND-NEXT: ret +; +; CHECK-EXTEND-ROUND-LABEL: test_copysign_v4f16_v4f64: +; CHECK-EXTEND-ROUND: // %bb.0: +; CHECK-EXTEND-ROUND-NEXT: ptrue p0.d +; CHECK-EXTEND-ROUND-NEXT: uunpkhi z3.d, z0.s +; CHECK-EXTEND-ROUND-NEXT: fcvt z2.h, p0/m, z2.d +; CHECK-EXTEND-ROUND-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK-EXTEND-ROUND-NEXT: uunpklo z0.d, z0.s +; CHECK-EXTEND-ROUND-NEXT: and z2.h, z2.h, #0x8000 +; CHECK-EXTEND-ROUND-NEXT: and z3.h, z3.h, #0x7fff +; CHECK-EXTEND-ROUND-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-EXTEND-ROUND-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-EXTEND-ROUND-NEXT: orr z2.d, z3.d, z2.d +; CHECK-EXTEND-ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK-EXTEND-ROUND-NEXT: uzp1 z0.s, z0.s, z2.s +; CHECK-EXTEND-ROUND-NEXT: ret %tmp0 = fptrunc %b to %r = call @llvm.copysign.v4f16( %a, %tmp0) ret %r @@ -192,16 +224,32 @@ } define @test_copysign_v8f16_v8f32( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v8f16_v8f32: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: and z0.h, z0.h, #0x7fff -; CHECK-NEXT: fcvt z2.h, p0/m, z2.s -; CHECK-NEXT: fcvt z1.h, p0/m, z1.s -; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h -; CHECK-NEXT: and z1.h, z1.h, #0x8000 -; CHECK-NEXT: orr z0.d, z0.d, z1.d -; CHECK-NEXT: ret +; CHECK-NO-EXTEND-ROUND-LABEL: test_copysign_v8f16_v8f32: +; CHECK-NO-EXTEND-ROUND: // %bb.0: +; CHECK-NO-EXTEND-ROUND-NEXT: ptrue p0.s +; CHECK-NO-EXTEND-ROUND-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-NO-EXTEND-ROUND-NEXT: fcvt z2.h, p0/m, z2.s +; CHECK-NO-EXTEND-ROUND-NEXT: fcvt z1.h, p0/m, z1.s +; CHECK-NO-EXTEND-ROUND-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NO-EXTEND-ROUND-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-NO-EXTEND-ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NO-EXTEND-ROUND-NEXT: ret +; +; CHECK-EXTEND-ROUND-LABEL: test_copysign_v8f16_v8f32: +; CHECK-EXTEND-ROUND: // %bb.0: +; CHECK-EXTEND-ROUND-NEXT: ptrue p0.s +; CHECK-EXTEND-ROUND-NEXT: uunpkhi z3.s, z0.h +; CHECK-EXTEND-ROUND-NEXT: fcvt z2.h, p0/m, z2.s +; CHECK-EXTEND-ROUND-NEXT: fcvt z1.h, p0/m, z1.s +; CHECK-EXTEND-ROUND-NEXT: uunpklo z0.s, z0.h +; CHECK-EXTEND-ROUND-NEXT: and z2.h, z2.h, #0x8000 +; CHECK-EXTEND-ROUND-NEXT: and z3.h, z3.h, #0x7fff +; CHECK-EXTEND-ROUND-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-EXTEND-ROUND-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-EXTEND-ROUND-NEXT: orr z2.d, z3.d, z2.d +; CHECK-EXTEND-ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK-EXTEND-ROUND-NEXT: uzp1 z0.h, z0.h, z2.h +; CHECK-EXTEND-ROUND-NEXT: ret %tmp0 = fptrunc %b to %r = call @llvm.copysign.v8f16( %a, %tmp0) ret %r diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll @@ -0,0 +1,558 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_NO_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=256 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=512 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=2048 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +target triple = "aarch64-unknown-linux-gnu" + +;============ f16 + +define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f16_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: mvni v2.4h, #128, lsl #8 +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <4 x half>, ptr %ap + %b = load <4 x half>, ptr %bp + %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) + store <4 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v8f16_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mvni v2.8h, #128, lsl #8 +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <8 x half>, ptr %ap + %b = load <8 x half>, ptr %bp + %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) + store <8 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v16f16_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %a = load <16 x half>, ptr %ap + %b = load <16 x half>, ptr %bp + %r = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) + store <16 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v32f16_v32f16(ptr %ap, ptr %bp) #0 { +; VBITS_GE_256-LABEL: test_copysign_v32f16_v32f16: +; VBITS_GE_256: // %bb.0: +; VBITS_GE_256-NEXT: mov x8, #16 +; VBITS_GE_256-NEXT: ptrue p0.h, vl16 +; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] +; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] +; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x1, x8, lsl #1] +; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] +; VBITS_GE_256-NEXT: and z0.h, z0.h, #0x7fff +; VBITS_GE_256-NEXT: and z1.h, z1.h, #0x7fff +; VBITS_GE_256-NEXT: and z2.h, z2.h, #0x8000 +; VBITS_GE_256-NEXT: and z3.h, z3.h, #0x8000 +; VBITS_GE_256-NEXT: orr z0.d, z0.d, z2.d +; VBITS_GE_256-NEXT: orr z1.d, z1.d, z3.d +; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] +; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: ret +; +; VBITS_GE_512-LABEL: test_copysign_v32f16_v32f16: +; VBITS_GE_512: // %bb.0: +; VBITS_GE_512-NEXT: ptrue p0.h, vl32 +; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1] +; VBITS_GE_512-NEXT: and z1.h, z1.h, #0x8000 +; VBITS_GE_512-NEXT: and z0.h, z0.h, #0x7fff +; VBITS_GE_512-NEXT: orr z0.d, z0.d, z1.d +; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0] +; VBITS_GE_512-NEXT: ret + %a = load <32 x half>, ptr %ap + %b = load <32 x half>, ptr %bp + %r = call <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b) + store <32 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v64f16_v64f16(ptr %ap, ptr %bp) vscale_range(8,0) #0 { +; CHECK-LABEL: test_copysign_v64f16_v64f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl64 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %a = load <64 x half>, ptr %ap + %b = load <64 x half>, ptr %bp + %r = call <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b) + store <64 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v128f16_v128f16(ptr %ap, ptr %bp) vscale_range(16,0) #0 { +; CHECK-LABEL: test_copysign_v128f16_v128f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl128 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: and z1.h, z1.h, #0x8000 +; CHECK-NEXT: and z0.h, z0.h, #0x7fff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %a = load <128 x half>, ptr %ap + %b = load <128 x half>, ptr %bp + %r = call <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b) + store <128 x half> %r, ptr %ap + ret void +} + +;============ f32 + +define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f32_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: mvni v2.2s, #128, lsl #24 +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <2 x float>, ptr %ap + %b = load <2 x float>, ptr %bp + %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) + store <2 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f32_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mvni v2.4s, #128, lsl #24 +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <4 x float>, ptr %ap + %b = load <4 x float>, ptr %bp + %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) + store <4 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v8f32_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: and z1.s, z1.s, #0x80000000 +; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %a = load <8 x float>, ptr %ap + %b = load <8 x float>, ptr %bp + %r = call <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) + store <8 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v16f32_v16f32(ptr %ap, ptr %bp) #0 { +; VBITS_GE_256-LABEL: test_copysign_v16f32_v16f32: +; VBITS_GE_256: // %bb.0: +; VBITS_GE_256-NEXT: mov x8, #8 +; VBITS_GE_256-NEXT: ptrue p0.s, vl8 +; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] +; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x1, x8, lsl #2] +; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] +; VBITS_GE_256-NEXT: and z0.s, z0.s, #0x7fffffff +; VBITS_GE_256-NEXT: and z1.s, z1.s, #0x7fffffff +; VBITS_GE_256-NEXT: and z2.s, z2.s, #0x80000000 +; VBITS_GE_256-NEXT: and z3.s, z3.s, #0x80000000 +; VBITS_GE_256-NEXT: orr z0.d, z0.d, z2.d +; VBITS_GE_256-NEXT: orr z1.d, z1.d, z3.d +; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] +; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: ret +; +; VBITS_GE_512-LABEL: test_copysign_v16f32_v16f32: +; VBITS_GE_512: // %bb.0: +; VBITS_GE_512-NEXT: ptrue p0.s, vl16 +; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1] +; VBITS_GE_512-NEXT: and z1.s, z1.s, #0x80000000 +; VBITS_GE_512-NEXT: and z0.s, z0.s, #0x7fffffff +; VBITS_GE_512-NEXT: orr z0.d, z0.d, z1.d +; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0] +; VBITS_GE_512-NEXT: ret + %a = load <16 x float>, ptr %ap + %b = load <16 x float>, ptr %bp + %r = call <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b) + store <16 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v32f32_v32f32(ptr %ap, ptr %bp) vscale_range(8,0) #0 { +; CHECK-LABEL: test_copysign_v32f32_v32f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: and z1.s, z1.s, #0x80000000 +; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %a = load <32 x float>, ptr %ap + %b = load <32 x float>, ptr %bp + %r = call <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b) + store <32 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0 { +; CHECK-LABEL: test_copysign_v64f32_v64f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl64 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: and z1.s, z1.s, #0x80000000 +; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %a = load <64 x float>, ptr %ap + %b = load <64 x float>, ptr %bp + %r = call <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b) + store <64 x float> %r, ptr %ap + ret void +} + +;============ f64 + +define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f64_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: fneg v0.2d, v0.2d +; CHECK-NEXT: bsl v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <2 x double>, ptr %ap + %b = load <2 x double>, ptr %bp + %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) + store <2 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f64_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: and z1.d, z1.d, #0x8000000000000000 +; CHECK-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %a = load <4 x double>, ptr %ap + %b = load <4 x double>, ptr %bp + %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) + store <4 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v8f64_v8f64(ptr %ap, ptr %bp) #0 { +; VBITS_GE_256-LABEL: test_copysign_v8f64_v8f64: +; VBITS_GE_256: // %bb.0: +; VBITS_GE_256-NEXT: mov x8, #4 +; VBITS_GE_256-NEXT: ptrue p0.d, vl4 +; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] +; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] +; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x1, x8, lsl #3] +; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] +; VBITS_GE_256-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; VBITS_GE_256-NEXT: and z1.d, z1.d, #0x7fffffffffffffff +; VBITS_GE_256-NEXT: and z2.d, z2.d, #0x8000000000000000 +; VBITS_GE_256-NEXT: and z3.d, z3.d, #0x8000000000000000 +; VBITS_GE_256-NEXT: orr z0.d, z0.d, z2.d +; VBITS_GE_256-NEXT: orr z1.d, z1.d, z3.d +; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] +; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: ret +; +; VBITS_GE_512-LABEL: test_copysign_v8f64_v8f64: +; VBITS_GE_512: // %bb.0: +; VBITS_GE_512-NEXT: ptrue p0.d, vl8 +; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1] +; VBITS_GE_512-NEXT: and z1.d, z1.d, #0x8000000000000000 +; VBITS_GE_512-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; VBITS_GE_512-NEXT: orr z0.d, z0.d, z1.d +; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0] +; VBITS_GE_512-NEXT: ret + %a = load <8 x double>, ptr %ap + %b = load <8 x double>, ptr %bp + %r = call <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b) + store <8 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v16f64_v16f64(ptr %ap, ptr %bp) vscale_range(8,0) #0 { +; CHECK-LABEL: test_copysign_v16f64_v16f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: and z1.d, z1.d, #0x8000000000000000 +; CHECK-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %a = load <16 x double>, ptr %ap + %b = load <16 x double>, ptr %bp + %r = call <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b) + store <16 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v32f64_v32f64(ptr %ap, ptr %bp) vscale_range(16,0) #0 { +; CHECK-LABEL: test_copysign_v32f64_v32f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: and z1.d, z1.d, #0x8000000000000000 +; CHECK-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %a = load <32 x double>, ptr %ap + %b = load <32 x double>, ptr %bp + %r = call <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b) + store <32 x double> %r, ptr %ap + ret void +} + +;============ v2f32 + +define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f32_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: mvni v2.2s, #128, lsl #24 +; CHECK-NEXT: ldr d1, [x0] +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: bit v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <2 x float>, ptr %ap + %b = load <2 x double>, ptr %bp + %tmp0 = fptrunc <2 x double> %b to <2 x float> + %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %tmp0) + store <2 x float> %r, ptr %ap + ret void +} + +;============ v4f32 + +; SplitVecOp #1 +define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f32_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mvni v2.4s, #128, lsl #24 +; CHECK-NEXT: fcvt z1.s, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <4 x float>, ptr %ap + %b = load <4 x double>, ptr %bp + %tmp0 = fptrunc <4 x double> %b to <4 x float> + %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0) + store <4 x float> %r, ptr %ap + ret void +} + +;============ v2f64 + +define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f64_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: ldr q2, [x0] +; CHECK-NEXT: fcvtl v1.2d, v1.2s +; CHECK-NEXT: fneg v0.2d, v0.2d +; CHECK-NEXT: bsl v0.16b, v2.16b, v1.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <2 x double>, ptr %ap + %b = load < 2 x float>, ptr %bp + %tmp0 = fpext <2 x float> %b to <2 x double> + %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %tmp0) + store <2 x double> %r, ptr %ap + ret void +} + +;============ v4f64 + +; SplitVecRes mismatched +define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32: +; CHECK_NO_EXTEND_ROUND: // %bb.0: +; CHECK_NO_EXTEND_ROUND-NEXT: ptrue p0.d, vl4 +; CHECK_NO_EXTEND_ROUND-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK_NO_EXTEND_ROUND-NEXT: ld1w { z1.d }, p0/z, [x1] +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z1.d, p0/m, z1.s +; CHECK_NO_EXTEND_ROUND-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; CHECK_NO_EXTEND_ROUND-NEXT: and z1.d, z1.d, #0x8000000000000000 +; CHECK_NO_EXTEND_ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK_NO_EXTEND_ROUND-NEXT: st1d { z0.d }, p0, [x0] +; CHECK_NO_EXTEND_ROUND-NEXT: ret +; +; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32: +; CHECK_EXTEND_ROUND: // %bb.0: +; CHECK_EXTEND_ROUND-NEXT: ptrue p0.d, vl4 +; CHECK_EXTEND_ROUND-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK_EXTEND_ROUND-NEXT: ldr q1, [x1] +; CHECK_EXTEND_ROUND-NEXT: uunpklo z1.d, z1.s +; CHECK_EXTEND_ROUND-NEXT: fcvt z1.d, p0/m, z1.s +; CHECK_EXTEND_ROUND-NEXT: and z0.d, z0.d, #0x7fffffffffffffff +; CHECK_EXTEND_ROUND-NEXT: and z1.d, z1.d, #0x8000000000000000 +; CHECK_EXTEND_ROUND-NEXT: orr z0.d, z0.d, z1.d +; CHECK_EXTEND_ROUND-NEXT: st1d { z0.d }, p0, [x0] +; CHECK_EXTEND_ROUND-NEXT: ret + %a = load <4 x double>, ptr %ap + %b = load <4 x float>, ptr %bp + %tmp0 = fpext <4 x float> %b to <4 x double> + %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0) + store <4 x double> %r, ptr %ap + ret void +} + +;============ v4f16 + +define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f16_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: mvni v2.4h, #128, lsl #8 +; CHECK-NEXT: ldr d1, [x0] +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: bit v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <4 x half>, ptr %ap + %b = load <4 x float>, ptr %bp + %tmp0 = fptrunc <4 x float> %b to <4 x half> + %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0) + store <4 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f16_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mvni v2.4h, #128, lsl #8 +; CHECK-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <4 x half>, ptr %ap + %b = load <4 x double>, ptr %bp + %tmp0 = fptrunc <4 x double> %b to <4 x half> + %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0) + store <4 x half> %r, ptr %ap + ret void +} + +declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0 + +;============ v8f16 + + +define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v8f16_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mvni v2.8h, #128, lsl #8 +; CHECK-NEXT: fcvt z1.h, p0/m, z1.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <8 x half>, ptr %ap + %b = load <8 x float>, ptr %bp + %tmp0 = fptrunc <8 x float> %b to <8 x half> + %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %tmp0) + store <8 x half> %r, ptr %ap + ret void +} + +declare <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) #0 +declare <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) #0 +declare <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b) #0 +declare <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b) #0 +declare <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b) #0 + +declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0 +declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0 +declare <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) #0 +declare <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b) #0 +declare <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b) #0 +declare <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b) #0 + +declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0 +declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0 +declare <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b) #0 +declare <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b) #0 +declare <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b) #0 + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll --- a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll +++ b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 -o - | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 -o - | FileCheck --check-prefixes=CHECK,CHECK_NO_EXTEND_ROUND %s +; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK_EXTEND_ROUND %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" @@ -44,15 +45,28 @@ ; SplitVecOp #1 define @test_copysign_v4f32_v4f64( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v4f32_v4f64: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: fcvt z2.s, p0/m, z2.d -; CHECK-NEXT: fcvt z1.s, p0/m, z1.d -; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s -; CHECK-NEXT: mov z2.s, #0x7fffffff -; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d -; CHECK-NEXT: ret +; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f32_v4f64: +; CHECK_NO_EXTEND_ROUND: // %bb.0: +; CHECK_NO_EXTEND_ROUND-NEXT: ptrue p0.d +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z2.s, p0/m, z2.d +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z1.s, p0/m, z1.d +; CHECK_NO_EXTEND_ROUND-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK_NO_EXTEND_ROUND-NEXT: mov z2.s, #0x7fffffff +; CHECK_NO_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK_NO_EXTEND_ROUND-NEXT: ret +; +; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f32_v4f64: +; CHECK_EXTEND_ROUND: // %bb.0: +; CHECK_EXTEND_ROUND-NEXT: ptrue p0.d +; CHECK_EXTEND_ROUND-NEXT: mov z3.s, #0x7fffffff +; CHECK_EXTEND_ROUND-NEXT: fcvt z2.s, p0/m, z2.d +; CHECK_EXTEND_ROUND-NEXT: uunpkhi z4.d, z0.s +; CHECK_EXTEND_ROUND-NEXT: fcvt z1.s, p0/m, z1.d +; CHECK_EXTEND_ROUND-NEXT: uunpklo z0.d, z0.s +; CHECK_EXTEND_ROUND-NEXT: bsl z4.d, z4.d, z2.d, z3.d +; CHECK_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z3.d +; CHECK_EXTEND_ROUND-NEXT: uzp1 z0.s, z0.s, z4.s +; CHECK_EXTEND_ROUND-NEXT: ret %tmp0 = fptrunc %b to %r = call @llvm.copysign.v4f32( %a, %tmp0) ret %r @@ -91,17 +105,29 @@ ; SplitVecRes mismatched define @test_copysign_v4f64_v4f32( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v4f64_v4f32: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: uunpkhi z3.d, z2.s -; CHECK-NEXT: uunpklo z2.d, z2.s -; CHECK-NEXT: fcvt z3.d, p0/m, z3.s -; CHECK-NEXT: fcvt z2.d, p0/m, z2.s -; CHECK-NEXT: mov z4.d, #0x7fffffffffffffff -; CHECK-NEXT: bsl z0.d, z0.d, z2.d, z4.d -; CHECK-NEXT: bsl z1.d, z1.d, z3.d, z4.d -; CHECK-NEXT: ret +; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32: +; CHECK_NO_EXTEND_ROUND: // %bb.0: +; CHECK_NO_EXTEND_ROUND-NEXT: ptrue p0.d +; CHECK_NO_EXTEND_ROUND-NEXT: uunpkhi z3.d, z2.s +; CHECK_NO_EXTEND_ROUND-NEXT: uunpklo z2.d, z2.s +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z3.d, p0/m, z3.s +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z2.d, p0/m, z2.s +; CHECK_NO_EXTEND_ROUND-NEXT: mov z4.d, #0x7fffffffffffffff +; CHECK_NO_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z2.d, z4.d +; CHECK_NO_EXTEND_ROUND-NEXT: bsl z1.d, z1.d, z3.d, z4.d +; CHECK_NO_EXTEND_ROUND-NEXT: ret +; +; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32: +; CHECK_EXTEND_ROUND: // %bb.0: +; CHECK_EXTEND_ROUND-NEXT: ptrue p0.d +; CHECK_EXTEND_ROUND-NEXT: uunpklo z3.d, z2.s +; CHECK_EXTEND_ROUND-NEXT: uunpkhi z2.d, z2.s +; CHECK_EXTEND_ROUND-NEXT: fcvt z3.d, p0/m, z3.s +; CHECK_EXTEND_ROUND-NEXT: mov z4.d, #0x7fffffffffffffff +; CHECK_EXTEND_ROUND-NEXT: fcvt z2.d, p0/m, z2.s +; CHECK_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z3.d, z4.d +; CHECK_EXTEND_ROUND-NEXT: bsl z1.d, z1.d, z2.d, z4.d +; CHECK_EXTEND_ROUND-NEXT: ret %tmp0 = fpext %b to %r = call @llvm.copysign.v4f64( %a, %tmp0) ret %r @@ -147,15 +173,28 @@ } define @test_copysign_v4f16_v4f64( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v4f16_v4f64: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: fcvt z2.h, p0/m, z2.d -; CHECK-NEXT: fcvt z1.h, p0/m, z1.d -; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s -; CHECK-NEXT: mov z2.h, #32767 // =0x7fff -; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d -; CHECK-NEXT: ret +; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f16_v4f64: +; CHECK_NO_EXTEND_ROUND: // %bb.0: +; CHECK_NO_EXTEND_ROUND-NEXT: ptrue p0.d +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z2.h, p0/m, z2.d +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK_NO_EXTEND_ROUND-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK_NO_EXTEND_ROUND-NEXT: mov z2.h, #32767 // =0x7fff +; CHECK_NO_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK_NO_EXTEND_ROUND-NEXT: ret +; +; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f16_v4f64: +; CHECK_EXTEND_ROUND: // %bb.0: +; CHECK_EXTEND_ROUND-NEXT: ptrue p0.d +; CHECK_EXTEND_ROUND-NEXT: mov z3.h, #32767 // =0x7fff +; CHECK_EXTEND_ROUND-NEXT: fcvt z2.h, p0/m, z2.d +; CHECK_EXTEND_ROUND-NEXT: uunpkhi z4.d, z0.s +; CHECK_EXTEND_ROUND-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK_EXTEND_ROUND-NEXT: uunpklo z0.d, z0.s +; CHECK_EXTEND_ROUND-NEXT: bsl z4.d, z4.d, z2.d, z3.d +; CHECK_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z3.d +; CHECK_EXTEND_ROUND-NEXT: uzp1 z0.s, z0.s, z4.s +; CHECK_EXTEND_ROUND-NEXT: ret %tmp0 = fptrunc %b to %r = call @llvm.copysign.v4f16( %a, %tmp0) ret %r @@ -176,15 +215,28 @@ } define @test_copysign_v8f16_v8f32( %a, %b) #0 { -; CHECK-LABEL: test_copysign_v8f16_v8f32: -; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: fcvt z2.h, p0/m, z2.s -; CHECK-NEXT: fcvt z1.h, p0/m, z1.s -; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h -; CHECK-NEXT: mov z2.h, #32767 // =0x7fff -; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d -; CHECK-NEXT: ret +; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v8f16_v8f32: +; CHECK_NO_EXTEND_ROUND: // %bb.0: +; CHECK_NO_EXTEND_ROUND-NEXT: ptrue p0.s +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z2.h, p0/m, z2.s +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z1.h, p0/m, z1.s +; CHECK_NO_EXTEND_ROUND-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK_NO_EXTEND_ROUND-NEXT: mov z2.h, #32767 // =0x7fff +; CHECK_NO_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK_NO_EXTEND_ROUND-NEXT: ret +; +; CHECK_EXTEND_ROUND-LABEL: test_copysign_v8f16_v8f32: +; CHECK_EXTEND_ROUND: // %bb.0: +; CHECK_EXTEND_ROUND-NEXT: ptrue p0.s +; CHECK_EXTEND_ROUND-NEXT: mov z3.h, #32767 // =0x7fff +; CHECK_EXTEND_ROUND-NEXT: fcvt z2.h, p0/m, z2.s +; CHECK_EXTEND_ROUND-NEXT: uunpkhi z4.s, z0.h +; CHECK_EXTEND_ROUND-NEXT: fcvt z1.h, p0/m, z1.s +; CHECK_EXTEND_ROUND-NEXT: uunpklo z0.s, z0.h +; CHECK_EXTEND_ROUND-NEXT: bsl z4.d, z4.d, z2.d, z3.d +; CHECK_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z3.d +; CHECK_EXTEND_ROUND-NEXT: uzp1 z0.h, z0.h, z4.h +; CHECK_EXTEND_ROUND-NEXT: ret %tmp0 = fptrunc %b to %r = call @llvm.copysign.v8f16( %a, %tmp0) ret %r diff --git a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll @@ -0,0 +1,536 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_NO_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=256 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=512 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND +; RUN: llc -aarch64-sve-vector-bits-min=2048 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND + + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +target triple = "aarch64-unknown-linux-gnu" + +;============ f16 + +define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f16_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: mvni v2.4h, #128, lsl #8 +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <4 x half>, ptr %ap + %b = load <4 x half>, ptr %bp + %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) + store <4 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v8f16_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mvni v2.8h, #128, lsl #8 +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <8 x half>, ptr %ap + %b = load <8 x half>, ptr %bp + %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) + store <8 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v16f16_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: mov z2.h, #32767 // =0x7fff +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %a = load <16 x half>, ptr %ap + %b = load <16 x half>, ptr %bp + %r = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) + store <16 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v32f16_v32f16(ptr %ap, ptr %bp) #0 { +; VBITS_GE_256-LABEL: test_copysign_v32f16_v32f16: +; VBITS_GE_256: // %bb.0: +; VBITS_GE_256-NEXT: mov x8, #16 +; VBITS_GE_256-NEXT: ptrue p0.h, vl16 +; VBITS_GE_256-NEXT: mov z4.h, #32767 // =0x7fff +; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] +; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] +; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x1, x8, lsl #1] +; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] +; VBITS_GE_256-NEXT: bsl z0.d, z0.d, z2.d, z4.d +; VBITS_GE_256-NEXT: bsl z1.d, z1.d, z3.d, z4.d +; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] +; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: ret +; +; VBITS_GE_512-LABEL: test_copysign_v32f16_v32f16: +; VBITS_GE_512: // %bb.0: +; VBITS_GE_512-NEXT: ptrue p0.h, vl32 +; VBITS_GE_512-NEXT: mov z2.h, #32767 // =0x7fff +; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1] +; VBITS_GE_512-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0] +; VBITS_GE_512-NEXT: ret + %a = load <32 x half>, ptr %ap + %b = load <32 x half>, ptr %bp + %r = call <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b) + store <32 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v64f16_v64f16(ptr %ap, ptr %bp) vscale_range(8,0) #0 { +; CHECK-LABEL: test_copysign_v64f16_v64f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl64 +; CHECK-NEXT: mov z2.h, #32767 // =0x7fff +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %a = load <64 x half>, ptr %ap + %b = load <64 x half>, ptr %bp + %r = call <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b) + store <64 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v128f16_v128f16(ptr %ap, ptr %bp) vscale_range(16,0) #0 { +; CHECK-LABEL: test_copysign_v128f16_v128f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl128 +; CHECK-NEXT: mov z2.h, #32767 // =0x7fff +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %a = load <128 x half>, ptr %ap + %b = load <128 x half>, ptr %bp + %r = call <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b) + store <128 x half> %r, ptr %ap + ret void +} + +;============ f32 + +define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f32_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: mvni v2.2s, #128, lsl #24 +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <2 x float>, ptr %ap + %b = load <2 x float>, ptr %bp + %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) + store <2 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f32_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mvni v2.4s, #128, lsl #24 +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <4 x float>, ptr %ap + %b = load <4 x float>, ptr %bp + %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) + store <4 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v8f32_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: mov z2.s, #0x7fffffff +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %a = load <8 x float>, ptr %ap + %b = load <8 x float>, ptr %bp + %r = call <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) + store <8 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v16f32_v16f32(ptr %ap, ptr %bp) #0 { +; VBITS_GE_256-LABEL: test_copysign_v16f32_v16f32: +; VBITS_GE_256: // %bb.0: +; VBITS_GE_256-NEXT: mov x8, #8 +; VBITS_GE_256-NEXT: ptrue p0.s, vl8 +; VBITS_GE_256-NEXT: mov z4.s, #0x7fffffff +; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] +; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x1, x8, lsl #2] +; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] +; VBITS_GE_256-NEXT: bsl z0.d, z0.d, z2.d, z4.d +; VBITS_GE_256-NEXT: bsl z1.d, z1.d, z3.d, z4.d +; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] +; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: ret +; +; VBITS_GE_512-LABEL: test_copysign_v16f32_v16f32: +; VBITS_GE_512: // %bb.0: +; VBITS_GE_512-NEXT: ptrue p0.s, vl16 +; VBITS_GE_512-NEXT: mov z2.s, #0x7fffffff +; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1] +; VBITS_GE_512-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0] +; VBITS_GE_512-NEXT: ret + %a = load <16 x float>, ptr %ap + %b = load <16 x float>, ptr %bp + %r = call <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b) + store <16 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v32f32_v32f32(ptr %ap, ptr %bp) vscale_range(8,0) #0 { +; CHECK-LABEL: test_copysign_v32f32_v32f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: mov z2.s, #0x7fffffff +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %a = load <32 x float>, ptr %ap + %b = load <32 x float>, ptr %bp + %r = call <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b) + store <32 x float> %r, ptr %ap + ret void +} + +define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0 { +; CHECK-LABEL: test_copysign_v64f32_v64f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl64 +; CHECK-NEXT: mov z2.s, #0x7fffffff +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %a = load <64 x float>, ptr %ap + %b = load <64 x float>, ptr %bp + %r = call <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b) + store <64 x float> %r, ptr %ap + ret void +} + +;============ f64 + +define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f64_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: fneg v0.2d, v0.2d +; CHECK-NEXT: bsl v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <2 x double>, ptr %ap + %b = load <2 x double>, ptr %bp + %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) + store <2 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f64_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: mov z2.d, #0x7fffffffffffffff +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %a = load <4 x double>, ptr %ap + %b = load <4 x double>, ptr %bp + %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) + store <4 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v8f64_v8f64(ptr %ap, ptr %bp) #0 { +; VBITS_GE_256-LABEL: test_copysign_v8f64_v8f64: +; VBITS_GE_256: // %bb.0: +; VBITS_GE_256-NEXT: mov x8, #4 +; VBITS_GE_256-NEXT: ptrue p0.d, vl4 +; VBITS_GE_256-NEXT: mov z4.d, #0x7fffffffffffffff +; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] +; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] +; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x1, x8, lsl #3] +; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] +; VBITS_GE_256-NEXT: bsl z0.d, z0.d, z2.d, z4.d +; VBITS_GE_256-NEXT: bsl z1.d, z1.d, z3.d, z4.d +; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] +; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: ret +; +; VBITS_GE_512-LABEL: test_copysign_v8f64_v8f64: +; VBITS_GE_512: // %bb.0: +; VBITS_GE_512-NEXT: ptrue p0.d, vl8 +; VBITS_GE_512-NEXT: mov z2.d, #0x7fffffffffffffff +; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1] +; VBITS_GE_512-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0] +; VBITS_GE_512-NEXT: ret + %a = load <8 x double>, ptr %ap + %b = load <8 x double>, ptr %bp + %r = call <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b) + store <8 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v16f64_v16f64(ptr %ap, ptr %bp) vscale_range(8,0) #0 { +; CHECK-LABEL: test_copysign_v16f64_v16f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: mov z2.d, #0x7fffffffffffffff +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %a = load <16 x double>, ptr %ap + %b = load <16 x double>, ptr %bp + %r = call <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b) + store <16 x double> %r, ptr %ap + ret void +} + +define void @test_copysign_v32f64_v32f64(ptr %ap, ptr %bp) vscale_range(16,0) #0 { +; CHECK-LABEL: test_copysign_v32f64_v32f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: mov z2.d, #0x7fffffffffffffff +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %a = load <32 x double>, ptr %ap + %b = load <32 x double>, ptr %bp + %r = call <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b) + store <32 x double> %r, ptr %ap + ret void +} + +;============ v2f32 + +define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f32_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: mvni v2.2s, #128, lsl #24 +; CHECK-NEXT: ldr d1, [x0] +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: bit v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <2 x float>, ptr %ap + %b = load <2 x double>, ptr %bp + %tmp0 = fptrunc <2 x double> %b to <2 x float> + %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %tmp0) + store <2 x float> %r, ptr %ap + ret void +} + +;============ v4f32 + +; SplitVecOp #1 +define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f32_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mvni v2.4s, #128, lsl #24 +; CHECK-NEXT: fcvt z1.s, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <4 x float>, ptr %ap + %b = load <4 x double>, ptr %bp + %tmp0 = fptrunc <4 x double> %b to <4 x float> + %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0) + store <4 x float> %r, ptr %ap + ret void +} + +;============ v2f64 + +define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v2f64_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: ldr q2, [x0] +; CHECK-NEXT: fcvtl v1.2d, v1.2s +; CHECK-NEXT: fneg v0.2d, v0.2d +; CHECK-NEXT: bsl v0.16b, v2.16b, v1.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <2 x double>, ptr %ap + %b = load < 2 x float>, ptr %bp + %tmp0 = fpext <2 x float> %b to <2 x double> + %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %tmp0) + store <2 x double> %r, ptr %ap + ret void +} + +;============ v4f64 + +; SplitVecRes mismatched +define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32: +; CHECK_NO_EXTEND_ROUND: // %bb.0: +; CHECK_NO_EXTEND_ROUND-NEXT: ptrue p0.d, vl4 +; CHECK_NO_EXTEND_ROUND-NEXT: mov z2.d, #0x7fffffffffffffff +; CHECK_NO_EXTEND_ROUND-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK_NO_EXTEND_ROUND-NEXT: ld1w { z1.d }, p0/z, [x1] +; CHECK_NO_EXTEND_ROUND-NEXT: fcvt z1.d, p0/m, z1.s +; CHECK_NO_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK_NO_EXTEND_ROUND-NEXT: st1d { z0.d }, p0, [x0] +; CHECK_NO_EXTEND_ROUND-NEXT: ret +; +; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32: +; CHECK_EXTEND_ROUND: // %bb.0: +; CHECK_EXTEND_ROUND-NEXT: ptrue p0.d, vl4 +; CHECK_EXTEND_ROUND-NEXT: mov z2.d, #0x7fffffffffffffff +; CHECK_EXTEND_ROUND-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK_EXTEND_ROUND-NEXT: ldr q1, [x1] +; CHECK_EXTEND_ROUND-NEXT: uunpklo z1.d, z1.s +; CHECK_EXTEND_ROUND-NEXT: fcvt z1.d, p0/m, z1.s +; CHECK_EXTEND_ROUND-NEXT: bsl z0.d, z0.d, z1.d, z2.d +; CHECK_EXTEND_ROUND-NEXT: st1d { z0.d }, p0, [x0] +; CHECK_EXTEND_ROUND-NEXT: ret + %a = load <4 x double>, ptr %ap + %b = load <4 x float>, ptr %bp + %tmp0 = fpext <4 x float> %b to <4 x double> + %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0) + store <4 x double> %r, ptr %ap + ret void +} + +;============ v4f16 + +define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f16_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: mvni v2.4h, #128, lsl #8 +; CHECK-NEXT: ldr d1, [x0] +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: bit v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <4 x half>, ptr %ap + %b = load <4 x float>, ptr %bp + %tmp0 = fptrunc <4 x float> %b to <4 x half> + %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0) + store <4 x half> %r, ptr %ap + ret void +} + +define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v4f16_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mvni v2.4h, #128, lsl #8 +; CHECK-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %a = load <4 x half>, ptr %ap + %b = load <4 x double>, ptr %bp + %tmp0 = fptrunc <4 x double> %b to <4 x half> + %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0) + store <4 x half> %r, ptr %ap + ret void +} + +declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0 + +;============ v8f16 + + +define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 { +; CHECK-LABEL: test_copysign_v8f16_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mvni v2.8h, #128, lsl #8 +; CHECK-NEXT: fcvt z1.h, p0/m, z1.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %a = load <8 x half>, ptr %ap + %b = load <8 x float>, ptr %bp + %tmp0 = fptrunc <8 x float> %b to <8 x half> + %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %tmp0) + store <8 x half> %r, ptr %ap + ret void +} + +declare <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) #0 +declare <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) #0 +declare <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b) #0 +declare <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b) #0 +declare <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b) #0 + +declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0 +declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0 +declare <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) #0 +declare <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b) #0 +declare <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b) #0 +declare <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b) #0 + +declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0 +declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0 +declare <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b) #0 +declare <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b) #0 +declare <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b) #0 + +attributes #0 = { "target-features"="+sve2" }