diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -241,6 +241,22 @@ return false; } + template + bool SelectEXTImm(SDValue N, SDValue &Imm) { + if (!isa(N)) + return false; + + int64_t MulImm = cast(N)->getSExtValue(); + + if (MulImm >= 0 && MulImm <= Max) { + MulImm *= Scale; + Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32); + return true; + } + + return false; + } + /// Form sequences of consecutive 64/128-bit registers for use in NEON /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have /// between 1 and 4 elements. If it contains a single element that is returned diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7426,7 +7426,9 @@ SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const { - if (Op.getConstantOperandAPInt(2) == -1) + EVT Ty = Op.getValueType(); + auto Idx = Op.getConstantOperandAPInt(2); + if (Idx.sge(-1) && Idx.slt(Ty.getVectorMinNumElements())) return Op; return SDValue(); } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -2398,6 +2398,16 @@ (i32 (UMOVvi32 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>; def : Pat<(i64 (vector_extract (nxv2i64 ZPR:$vec), VectorIndexD:$index)), (i64 (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index))>; + + // Splice with lane bigger or equal to 0 + def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 (sve_ext_imm_0_15 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_15:$index)>; + def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 (sve_ext_imm_0_7 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_7:$index)>; + def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 (sve_ext_imm_0_3 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_3:$index)>; + def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 (sve_ext_imm_0_1 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_1:$index)>; } // Extract first element from vector. diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -263,6 +263,12 @@ def sve_cnt_mul_imm : ComplexPattern">; def sve_cnt_shl_imm : ComplexPattern">; + +def sve_ext_imm_0_1 : ComplexPattern">; +def sve_ext_imm_0_3 : ComplexPattern">; +def sve_ext_imm_0_7 : ComplexPattern">; +def sve_ext_imm_0_15 : ComplexPattern">; + def int_aarch64_sve_cntp_oneuse : PatFrag<(ops node:$pred, node:$src2), (int_aarch64_sve_cntp node:$pred, node:$src2), [{ return N->hasOneUse(); diff --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll --- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll +++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll @@ -10,15 +10,7 @@ define @splice_nxv16i8_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1b { z0.b }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 0) ret %res @@ -27,16 +19,7 @@ define @splice_nxv16i8_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xf -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #15 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 15) ret %res @@ -68,15 +51,7 @@ define @splice_nxv8i16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1h { z0.h }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 0) ret %res @@ -85,16 +60,7 @@ define @splice_nxv8i16_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xe -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 7) ret %res @@ -126,15 +92,7 @@ define @splice_nxv4i32_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1w { z0.s }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 0) ret %res @@ -143,16 +101,7 @@ define @splice_nxv4i32_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xc -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 3) ret %res @@ -184,15 +133,7 @@ define @splice_nxv2i64_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 0) ret %res @@ -201,16 +142,7 @@ define @splice_nxv2i64_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 1) ret %res @@ -271,6 +203,49 @@ ret %res } +define @splice_nxv2f16_first_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f16_first_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 0) + ret %res +} + +define @splice_nxv2f16_1_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f16_1_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 1) + ret %res +} + +; Ensure index is clamped when we cannot prove it's less than VL-1. +define @splice_nxv2f16_last_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f16_last_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: cntd x10 +; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: mov w9, #2 +; CHECK-NEXT: cmp x10, #2 // =2 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: csel x9, x10, x9, lo +; CHECK-NEXT: ptrue p1.b +; CHECK-NEXT: st1h { z0.h }, p0, [sp] +; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] +; CHECK-NEXT: lsl x9, x9, #3 +; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 2) + ret %res +} + define @splice_nxv4f16_neg_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f16_neg_idx: ; CHECK: // %bb.0: @@ -303,18 +278,54 @@ ret %res } -define @splice_nxv8f16_first_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv8f16_first_idx: +define @splice_nxv4f16_first_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4f16_first_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 0) + ret %res +} + +define @splice_nxv4f16_3_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4f16_3_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 3) + ret %res +} + +; Ensure index is clamped when we cannot prove it's less than VL-1. +define @splice_nxv4f16_last_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4f16_last_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: cntw x10 +; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: mov w9, #4 +; CHECK-NEXT: cmp x10, #4 // =4 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: csel x9, x10, x9, lo +; CHECK-NEXT: ptrue p1.b ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1h { z0.h }, p0/z, [sp] +; CHECK-NEXT: lsl x9, x9, #2 +; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 4) + ret %res +} + + +define @splice_nxv8f16_first_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv8f16_first_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 0) ret %res @@ -323,16 +334,7 @@ define @splice_nxv8f16_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xe -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 7) ret %res @@ -393,18 +395,53 @@ ret %res } -define @splice_nxv4f32_first_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv4f32_first_idx: +define @splice_nxv2f32_first_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f32_first_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 0) + ret %res +} + +define @splice_nxv2f32_1_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f32_1_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 1) + ret %res +} + +; Ensure index is clamped when we cannot prove it's less than VL-1. +define @splice_nxv2f32_last_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f32_last_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: cntd x10 +; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: mov w9, #2 +; CHECK-NEXT: cmp x10, #2 // =2 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: csel x9, x10, x9, lo +; CHECK-NEXT: ptrue p1.b ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1w { z0.s }, p0/z, [sp] +; CHECK-NEXT: lsl x9, x9, #3 +; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9] ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 2) + ret %res +} + +define @splice_nxv4f32_first_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4f32_first_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 0) ret %res @@ -413,16 +450,7 @@ define @splice_nxv4f32_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0xc -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 3) ret %res @@ -454,15 +482,7 @@ define @splice_nxv2f64_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 0) ret %res @@ -471,16 +491,7 @@ define @splice_nxv2f64_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 1) ret %res @@ -513,20 +524,12 @@ define @splice_nxv2i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.d, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.d, z1.d, #0x1 ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov z1.d, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: and z0.d, z0.d, #0x1 -; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i1( %a, %b, i32 1) ret %res @@ -536,20 +539,12 @@ define @splice_nxv4i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.s, z1.s, #0x1 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov z1.s, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1w { z0.s }, p0, [sp] -; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8] -; CHECK-NEXT: and z0.s, z0.s, #0x1 -; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i1( %a, %b, i32 2) ret %res @@ -559,20 +554,12 @@ define @splice_nxv8i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.h, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.h, z1.h, #0x1 ; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov z1.h, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1h { z0.h }, p0, [sp] -; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: and z0.h, z0.h, #0x1 -; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i1( %a, %b, i32 4) ret %res @@ -582,20 +569,12 @@ define @splice_nxv16i1_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 +; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 +; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 +; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8 +; CHECK-NEXT: and z1.b, z1.b, #0x1 ; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1b { z0.b }, p0, [sp] -; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] -; CHECK-NEXT: and z0.b, z0.b, #0x1 -; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i1( %a, %b, i32 8) ret %res @@ -605,16 +584,7 @@ define @splice_nxv2i8_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i8_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: st1d { z0.d }, p0, [sp] -; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] -; CHECK-NEXT: orr x8, x8, #0x8 -; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] -; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 1) ret %res