diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6157,6 +6157,11 @@ break; case ISD::VECTOR_SHUFFLE: llvm_unreachable("should use getVectorShuffle constructor!"); + case ISD::VECTOR_SPLICE: { + if (cast(N3)->isNullValue()) + return N1; + break; + } case ISD::INSERT_VECTOR_ELT: { ConstantSDNode *N3C = dyn_cast(N3); // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7630,11 +7630,14 @@ SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const { - EVT Ty = Op.getValueType(); auto Idx = Op.getConstantOperandAPInt(2); - if (Idx.sge(-1) && Idx.slt(Ty.getVectorMinNumElements())) + + // This will select to an EXT instruction, which has a maximum immediate + // value of 255, hence 2048-bits is the maximum value we can lower. + if (Idx.sge(-1) && Idx.slt(2048 / Ty.getVectorElementType().getSizeInBits())) return Op; + return SDValue(); } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -2598,14 +2598,14 @@ } // Splice with lane bigger or equal to 0 - def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 (sve_ext_imm_0_15 i32:$index)))), - (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_15:$index)>; - def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 (sve_ext_imm_0_7 i32:$index)))), - (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_7:$index)>; - def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 (sve_ext_imm_0_3 i32:$index)))), - (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_3:$index)>; - def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 (sve_ext_imm_0_1 i32:$index)))), - (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_1:$index)>; + def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 (sve_ext_imm_0_255 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_255:$index)>; + def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 (sve_ext_imm_0_127 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_127:$index)>; + def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 (sve_ext_imm_0_63 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_63:$index)>; + def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 (sve_ext_imm_0_31 i32:$index)))), + (EXT_ZZI ZPR:$Z1, ZPR:$Z2, sve_ext_imm_0_31:$index)>; } // End HasSVEorStreamingSVE diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -264,10 +264,10 @@ def sve_cnt_shl_imm : ComplexPattern">; -def sve_ext_imm_0_1 : ComplexPattern">; -def sve_ext_imm_0_3 : ComplexPattern">; -def sve_ext_imm_0_7 : ComplexPattern">; -def sve_ext_imm_0_15 : ComplexPattern">; +def sve_ext_imm_0_31 : ComplexPattern">; +def sve_ext_imm_0_63 : ComplexPattern">; +def sve_ext_imm_0_127 : ComplexPattern">; +def sve_ext_imm_0_255 : ComplexPattern">; def int_aarch64_sve_cntp_oneuse : PatFrag<(ops node:$pred, node:$src2), (int_aarch64_sve_cntp node:$pred, node:$src2), [{ diff --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll --- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll +++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll @@ -7,25 +7,33 @@ ; VECTOR_SPLICE (index) ; +define @splice_nxv16i8_zero_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv16i8_zero_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 0) + ret %res +} + define @splice_nxv16i8_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #1 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 1) ret %res } define @splice_nxv16i8_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #15 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #255 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 15) + %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 255) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv16i8_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_clamped_idx: ; CHECK: // %bb.0: @@ -35,8 +43,8 @@ ; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w10, #16 -; CHECK-NEXT: cmp x9, #16 +; CHECK-NEXT: mov w10, #256 +; CHECK-NEXT: cmp x9, #256 ; CHECK-NEXT: st1b { z0.b }, p0, [sp] ; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -44,29 +52,29 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 16) + %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 256) ret %res } define @splice_nxv8i16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #2 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 1) ret %res } define @splice_nxv8i16_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #254 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 7) + %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 127) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv8i16_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_clamped_idx: ; CHECK: // %bb.0: @@ -76,8 +84,8 @@ ; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w9, #8 -; CHECK-NEXT: cmp x10, #8 +; CHECK-NEXT: mov w9, #128 +; CHECK-NEXT: cmp x10, #128 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -85,29 +93,29 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 8) + %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 128) ret %res } define @splice_nxv4i32_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #4 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 1) ret %res } define @splice_nxv4i32_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #252 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 3) + %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 63) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv4i32_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_clamped_idx: ; CHECK: // %bb.0: @@ -117,8 +125,8 @@ ; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x10, #4 +; CHECK-NEXT: mov w9, #64 +; CHECK-NEXT: cmp x10, #64 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -126,29 +134,29 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 4) + %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 64) ret %res } define @splice_nxv2i64_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 1) ret %res } define @splice_nxv2i64_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #248 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 1) + %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 31) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv2i64_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_clamped_idx: ; CHECK: // %bb.0: @@ -158,8 +166,8 @@ ; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 +; CHECK-NEXT: mov w9, #32 +; CHECK-NEXT: cmp x10, #32 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -167,7 +175,7 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 2) + %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 32) ret %res } @@ -205,31 +213,31 @@ define @splice_nxv2f16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 1) ret %res } -define @splice_nxv2f16_1_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv2f16_1_idx: +define @splice_nxv2f16_last_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #248 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 1) + %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 31) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. -define @splice_nxv2f16_last_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv2f16_last_idx: +; Ensure index is clamped when we cannot prove it's less than 2048-bit. +define @splice_nxv2f16_clamped_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f16_clamped_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntd x10 ; CHECK-NEXT: sub x10, x10, #1 -; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 +; CHECK-NEXT: mov w9, #32 +; CHECK-NEXT: cmp x10, #32 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -241,7 +249,7 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 2) + %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 32) ret %res } @@ -279,31 +287,31 @@ define @splice_nxv4f16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #4 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 1) ret %res } -define @splice_nxv4f16_3_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv4f16_3_idx: +define @splice_nxv4f16_last_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4f16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #252 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 3) + %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 63) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. -define @splice_nxv4f16_last_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv4f16_last_idx: +; Ensure index is clamped when we cannot prove it's less than 2048-bit. +define @splice_nxv4f16_clamped_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4f16_clamped_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntw x10 ; CHECK-NEXT: sub x10, x10, #1 -; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x10, #4 +; CHECK-NEXT: mov w9, #64 +; CHECK-NEXT: cmp x10, #64 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -315,30 +323,29 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 4) + %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 64) ret %res } - define @splice_nxv8f16_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #2 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 1) ret %res } define @splice_nxv8f16_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #254 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 7) + %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 127) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv8f16_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_clamped_idx: ; CHECK: // %bb.0: @@ -348,8 +355,8 @@ ; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w9, #8 -; CHECK-NEXT: cmp x10, #8 +; CHECK-NEXT: mov w9, #128 +; CHECK-NEXT: cmp x10, #128 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -357,7 +364,7 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 8) + %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 128) ret %res } @@ -395,31 +402,31 @@ define @splice_nxv2f32_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f32_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 1) ret %res } -define @splice_nxv2f32_1_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv2f32_1_idx: +define @splice_nxv2f32_last_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #248 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 1) + %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 31) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. -define @splice_nxv2f32_last_idx( %a, %b) #0 { -; CHECK-LABEL: splice_nxv2f32_last_idx: +; Ensure index is clamped when we cannot prove it's less than 2048-bit. +define @splice_nxv2f32_clamped_idx( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2f32_clamped_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntd x10 ; CHECK-NEXT: sub x10, x10, #1 -; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 +; CHECK-NEXT: mov w9, #32 +; CHECK-NEXT: cmp x10, #32 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -431,29 +438,29 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 2) + %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 32) ret %res } define @splice_nxv4f32_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #4 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 1) ret %res } define @splice_nxv4f32_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #252 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 3) + %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 63) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv4f32_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_clamped_idx: ; CHECK: // %bb.0: @@ -463,8 +470,8 @@ ; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x10, #4 +; CHECK-NEXT: mov w9, #64 +; CHECK-NEXT: cmp x10, #64 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -472,29 +479,29 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 4) + %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 64) ret %res } define @splice_nxv2f64_first_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_first_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 0) + %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 1) ret %res } define @splice_nxv2f64_last_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_last_idx: ; CHECK: // %bb.0: -; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8 +; CHECK-NEXT: ext z0.b, z0.b, z1.b, #248 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 1) + %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 31) ret %res } -; Ensure index is clamped when we cannot prove it's less than VL-1. +; Ensure index is clamped when we cannot prove it's less than 2048-bit. define @splice_nxv2f64_clamped_idx( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_clamped_idx: ; CHECK: // %bb.0: @@ -504,8 +511,8 @@ ; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 +; CHECK-NEXT: mov w9, #32 +; CHECK-NEXT: cmp x10, #32 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -513,7 +520,7 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 2) + %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 32) ret %res }