Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2162,6 +2162,16 @@ switch (IntNo) { default: break; + case Intrinsic::aarch64_neon_uaddlv: { + MVT VT = Op.getOperand(1).getValueType().getSimpleVT(); + unsigned BitWidth = Known.getBitWidth(); + if (VT == MVT::v8i8) { + assert(BitWidth >= 16 && "Unexpected width!"); + APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16); + Known.Zero |= Mask; + } + break; + } case Intrinsic::aarch64_neon_umaxv: case Intrinsic::aarch64_neon_uminv: { // Figure out the datatype of the vector operand. The UMINV instruction Index: llvm/test/CodeGen/AArch64/neon-addlv.ll =================================================================== --- llvm/test/CodeGen/AArch64/neon-addlv.ll +++ llvm/test/CodeGen/AArch64/neon-addlv.ll @@ -150,3 +150,16 @@ %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3) ret i32 %tmp5 } + +declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>) nounwind readnone + +define i32 @uaddlv_known_bits(<8 x i8> %a) { +; CHECK-LABEL: uaddlv_known_bits: +; CHECK: // %bb.0: +; CHECK-NEXT: uaddlv h0, v0.8b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %tmp1 = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) + %tmp2 = and i32 %tmp1, 65535 + ret i32 %tmp2 +}