diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9728,6 +9728,10 @@ if (ExtIdxInBits % CastedEltBitWidth != 0) return false; + // Can't handle cases where vector size is greater than 128-bit + if (Extract.getOperand(0).getValueSizeInBits() > 128) + return false; + // Update the lane value by offsetting with the scaled extract index. LaneC += ExtIdxInBits / CastedEltBitWidth; diff --git a/llvm/test/CodeGen/AArch64/sve-shuffle-crash.ll b/llvm/test/CodeGen/AArch64/sve-shuffle-crash.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-shuffle-crash.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @foo(<32 x i32>* %dst, i1 %cond) #0 { + %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer + br i1 %cond, label %exit, label %vector.body + +vector.body: + %1 = load <32 x i32>, <32 x i32>* %dst, align 16 + %predphi = select <32 x i1> %broadcast.splat, <32 x i32> zeroinitializer, <32 x i32> %1 + store <32 x i32> %predphi, <32 x i32>* %dst, align 16 + br label %exit + +exit: + ret void +} + +attributes #0 = { vscale_range(2,2) "target-features"="+sve" }