diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -11452,6 +11452,9 @@ } } + if (!Imm) + return SDValue(); + SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm); return DAG.getNode(AArch64ISD::SETCC_PRED, DL, VT, Pred, N->getOperand(2), Splat, DAG.getCondCode(CC)); diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll @@ -940,6 +940,30 @@ ret %out } + +define @cmpgt_wide_splat_b( %pg, %a, i64 %b) { +; CHECK-LABEL: cmpgt_wide_splat_b: +; CHECK: cmpgt p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %splat = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b) + %out = call @llvm.aarch64.sve.cmpgt.wide.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @cmpls_wide_splat_s( %pg, %a, i64 %b) { +; CHECK-LABEL: cmpls_wide_splat_s: +; CHECK: cmpls p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %splat = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b) + %out = call @llvm.aarch64.sve.cmpls.wide.nxv4i32( %pg, + %a, + %splat) + ret %out +} + + declare @llvm.aarch64.sve.cmpeq.nxv16i8(, , ) declare @llvm.aarch64.sve.cmpeq.nxv8i16(, , ) declare @llvm.aarch64.sve.cmpeq.nxv4i32(, , ) @@ -1003,3 +1027,5 @@ declare @llvm.aarch64.sve.cmpne.wide.nxv16i8(, , ) declare @llvm.aarch64.sve.cmpne.wide.nxv8i16(, , ) declare @llvm.aarch64.sve.cmpne.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.dup.x.nxv2i64(i64)