diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -13333,6 +13333,9 @@ SDLoc DL(N); ConstantSDNode *C = dyn_cast(Dup->getOperand(0)); + if (!C) + return SDValue(); + uint64_t ExtVal = C->getZExtValue(); // If the mask is fully covered by the unpack, we don't need to push diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +; Check that this test does not crash at performSVEAndCombine. + +define @test( %in1, %in2) { +; CHECK-LABEL: test: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: mov z1.s, s1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: ret +entry: + %i1 = call @llvm.aarch64.sve.uunpkhi.nxv4i32( %in1) + %i2 = shufflevector %in2, undef, zeroinitializer + %i3 = and %i1, %i2 + ret %i3 +} + +declare @llvm.aarch64.sve.uunpkhi.nxv4i32()