diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -223,6 +223,22 @@ return false; } + template + bool SelectImm(SDValue N, SDValue &Imm) { + if (!isa(N)) + return false; + + int64_t MulImm = cast(N)->getSExtValue(); + + MulImm *= Scale; + if ((MulImm >= Min * Scale) && (MulImm <= Max * Scale)) { + Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32); + return true; + } + + return false; + } + /// Form sequences of consecutive 64/128-bit registers for use in NEON /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have /// between 1 and 4 elements. If it contains a single element that is returned diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -946,6 +946,7 @@ SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, unsigned NewOp, bool OverrideNEON = false) const; SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1158,6 +1158,7 @@ setOperationAction(ISD::MULHS, VT, Custom); setOperationAction(ISD::MULHU, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); + setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SETCC, VT, Custom); setOperationAction(ISD::SDIV, VT, Custom); @@ -1272,6 +1273,7 @@ setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); } @@ -1538,6 +1540,7 @@ setOperationAction(ISD::SMAX, VT, Custom); setOperationAction(ISD::SMIN, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); + setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::SRA, VT, Custom); setOperationAction(ISD::SRL, VT, Custom); setOperationAction(ISD::STORE, VT, Custom); @@ -4886,6 +4889,8 @@ /*OverrideNEON=*/true); case ISD::CTTZ: return LowerCTTZ(Op, DAG); + case ISD::VECTOR_SPLICE: + return LowerVECTOR_SPLICE(Op, DAG); } } @@ -7391,6 +7396,32 @@ return CS1; } +SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op, + SelectionDAG &DAG) const { + + EVT Ty = Op.getValueType(); + assert(Ty.isScalableVector() && "Expected Scalable Type"); + EVT VTy = Ty.isFloatingPoint() + ? getPackedSVEVectorVT(Ty.getVectorElementCount()) + : Ty; + + SDValue LHS = Ty.isFloatingPoint() ? DAG.getBitcast(VTy, Op.getOperand(0)) + : Op.getOperand(0); + SDValue RHS = Ty.isFloatingPoint() ? DAG.getBitcast(VTy, Op.getOperand(1)) + : Op.getOperand(1); + SDValue Idx = Op.getOperand(2); + SDLoc DL(Op); + if ((!Op.getConstantOperandAPInt(2).isNegative() && + Ty.getVectorMinNumElements() > Op.getConstantOperandVal(2)) || + Op.getConstantOperandAPInt(2) == -1) { + SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, VTy, LHS, RHS, Idx); + if (Ty.isFloatingPoint()) + return DAG.getNode(ISD::BITCAST, DL, Ty, Splice); + return Splice; + } + return SDValue(); +} + SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { ISD::CondCode CC = cast(Op.getOperand(4))->get(); diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1226,6 +1226,20 @@ def : Pat<(nxv8bf16 (concat_vectors nxv4bf16:$v1, nxv4bf16:$v2)), (UZP1_ZZZ_H $v1, $v2)>; + // Splice with lane equal to -1 + def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 -1))), + (INSR_ZV_B (REV_ZZ_B ZPR:$Z2), (INSERT_SUBREG (IMPLICIT_DEF), + (LASTB_VPZ_B (PTRUE_B 31), ZPR:$Z1), bsub))>; + def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 -1))), + (INSR_ZV_H (REV_ZZ_H ZPR:$Z2), (INSERT_SUBREG (IMPLICIT_DEF), + (LASTB_VPZ_H (PTRUE_H 31), ZPR:$Z1), hsub))>; + def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 -1))), + (INSR_ZV_S (REV_ZZ_S ZPR:$Z2), (INSERT_SUBREG (IMPLICIT_DEF), + (LASTB_VPZ_S (PTRUE_S 31), ZPR:$Z1), ssub))>; + def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 -1))), + (INSR_ZV_D (REV_ZZ_D ZPR:$Z2), (INSERT_SUBREG (IMPLICIT_DEF), + (LASTB_VPZ_D (PTRUE_D 31), ZPR:$Z1), dsub))>; + defm CMPHS_PPzZZ : sve_int_cmp_0<0b000, "cmphs", SETUGE, SETULE>; defm CMPHI_PPzZZ : sve_int_cmp_0<0b001, "cmphi", SETUGT, SETULT>; defm CMPGE_PPzZZ : sve_int_cmp_0<0b100, "cmpge", SETGE, SETLE>; @@ -2370,6 +2384,17 @@ def : Pat<(vector_extract (nxv2f64 ZPR:$Zs), (i64 0)), (f64 (EXTRACT_SUBREG ZPR:$Zs, dsub))>; } + + // Splice with lane bigger or equalt to 0 + def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 (sve_imm_0_16 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_imm_0_16:$index)>; + def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 (sve_imm_0_8 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_imm_0_8:$index)>; + def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 (sve_imm_0_4 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_imm_0_4:$index)>; + def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 (sve_imm_0_2 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_imm_0_2:$index)>; + } let Predicates = [HasSVE, HasMatMulInt8] in { diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -263,6 +263,11 @@ def sve_cnt_mul_imm : ComplexPattern">; def sve_cnt_shl_imm : ComplexPattern">; +def sve_imm_0_2 : ComplexPattern">; +def sve_imm_0_4 : ComplexPattern">; +def sve_imm_0_8 : ComplexPattern">; +def sve_imm_0_16 : ComplexPattern">; + def int_aarch64_sve_cntp_oneuse : PatFrag<(ops node:$pred, node:$src2), (int_aarch64_sve_cntp node:$pred, node:$src2), [{ return N->hasOneUse(); diff --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll --- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll +++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll @@ -10,15 +10,7 @@ define @splice_nxv16i8_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1b { z0.b }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 0) ret %res @@ -27,16 +19,7 @@ define @splice_nxv16i8_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xf -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #15 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 15) ret %res @@ -68,15 +51,7 @@ define @splice_nxv8i16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1h { z0.h }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 0) ret %res @@ -85,16 +60,7 @@ define @splice_nxv8i16_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xe -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 7) ret %res @@ -126,15 +92,7 @@ define @splice_nxv4i32_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1w { z0.s }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 0) ret %res @@ -143,16 +101,7 @@ define @splice_nxv4i32_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xc -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 3) ret %res @@ -184,15 +133,7 @@ define @splice_nxv2i64_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 0) ret %res @@ -201,16 +142,7 @@ define @splice_nxv2i64_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 1) ret %res @@ -242,15 +174,7 @@ define @splice_nxv8f16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1h { z0.h }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 0) ret %res @@ -259,16 +183,7 @@ define @splice_nxv8f16_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xe -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 7) ret %res @@ -300,15 +215,7 @@ define @splice_nxv4f32_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1w { z0.s }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 0) ret %res @@ -317,16 +224,7 @@ define @splice_nxv4f32_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xc -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 3) ret %res @@ -358,15 +256,7 @@ define @splice_nxv2f64_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 0) ret %res @@ -375,16 +265,7 @@ define @splice_nxv2f64_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 1) ret %res @@ -417,20 +298,12 @@ define @splice_nxv2i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.d, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.d, z1.d, #0x1 ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov z1.d, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: and z0.d, z0.d, #0x1 -; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i1( %a, %b, i32 1) ret %res @@ -440,20 +313,12 @@ define @splice_nxv4i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.s, z1.s, #0x1 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov z1.s, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8] -; CHECK-NEXT: and z0.s, z0.s, #0x1 -; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i1( %a, %b, i32 2) ret %res @@ -463,20 +328,12 @@ define @splice_nxv8i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.h, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.h, z1.h, #0x1 ; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov z1.h, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: and z0.h, z0.h, #0x1 -; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i1( %a, %b, i32 4) ret %res @@ -486,20 +343,12 @@ define @splice_nxv16i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.b, z1.b, #0x1 ; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] -; CHECK-NEXT: and z0.b, z0.b, #0x1 -; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i1( %a, %b, i32 8) ret %res @@ -509,16 +358,7 @@ define @splice_nxv2i8_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i8_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 1) ret %res @@ -605,17 +445,10 @@ define @splice_nxv16i8_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-1 -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb b2, p0, z0.b +; CHECK-NEXT: rev z0.b, z1.b +; CHECK-NEXT: insr z0.b, b2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -1) ret %res @@ -668,18 +501,10 @@ define @splice_nxv8i16_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-2 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb h2, p0, z0.h +; CHECK-NEXT: rev z0.h, z1.h +; CHECK-NEXT: insr z0.h, h2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -1) ret %res @@ -732,18 +557,10 @@ define @splice_nxv4i32_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-4 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb s2, p0, z0.s +; CHECK-NEXT: rev z0.s, z1.s +; CHECK-NEXT: insr z0.s, s2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -1) ret %res @@ -796,18 +613,10 @@ define @splice_nxv2i64_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-8 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb d2, p0, z0.d +; CHECK-NEXT: rev z0.d, z1.d +; CHECK-NEXT: insr z0.d, d2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -1) ret %res @@ -860,18 +669,10 @@ define @splice_nxv8f16_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-2 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb h2, p0, z0.h +; CHECK-NEXT: rev z0.h, z1.h +; CHECK-NEXT: insr z0.h, h2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -1) ret %res @@ -924,18 +725,10 @@ define @splice_nxv4f32_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-4 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb s2, p0, z0.s +; CHECK-NEXT: rev z0.s, z1.s +; CHECK-NEXT: insr z0.s, s2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -1) ret %res @@ -988,18 +781,10 @@ define @splice_nxv2f64_1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-8 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb d2, p0, z0.d +; CHECK-NEXT: rev z0.d, z1.d +; CHECK-NEXT: insr z0.d, d2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -1) ret %res @@ -1033,22 +818,14 @@ define @splice_nxv2i1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z1.d, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-8 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: and z0.d, z0.d, #0x1 -; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb d0, p0, z0.d +; CHECK-NEXT: rev z1.d, z1.d +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: and z1.d, z1.d, #0x1 +; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i1( %a, %b, i32 -1) ret %res @@ -1058,22 +835,14 @@ define @splice_nxv4i1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov z1.s, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-4 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: and z0.s, z0.s, #0x1 -; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb s0, p0, z0.s +; CHECK-NEXT: rev z1.s, z1.s +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: and z1.s, z1.s, #0x1 +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i1( %a, %b, i32 -1) ret %res @@ -1083,22 +852,14 @@ define @splice_nxv8i1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov z1.h, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-2 -; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] -; CHECK-NEXT: and z0.h, z0.h, #0x1 -; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb h0, p0, z0.h +; CHECK-NEXT: rev z1.h, z1.h +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: and z1.h, z1.h, #0x1 +; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i1( %a, %b, i32 -1) ret %res @@ -1108,21 +869,14 @@ define @splice_nxv16i1( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: addvl x8, x8, #1 -; CHECK-NEXT: mov x9, #-1 -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8, x9] -; CHECK-NEXT: and z0.b, z0.b, #0x1 -; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: lastb b0, p0, z0.b +; CHECK-NEXT: rev z1.b, z1.b +; CHECK-NEXT: insr z1.b, b0 +; CHECK-NEXT: and z1.b, z1.b, #0x1 +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i1( %a, %b, i32 -1) ret %res