Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -5753,7 +5753,7 @@ (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))), (i64 0))), (i64 0))), (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), - (UADDLVv4i16v V64:$op), ssub), ssub)>; + (UADDLVv8i8v V64:$op), hsub), ssub)>; def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp (v16i8 V128:$op))))), (i64 0))), (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), Index: llvm/test/CodeGen/AArch64/neon-uaddlv.ll =================================================================== --- llvm/test/CodeGen/AArch64/neon-uaddlv.ll +++ llvm/test/CodeGen/AArch64/neon-uaddlv.ll @@ -17,7 +17,7 @@ ; CHECK-LABEL: uaddlv4h_from_v8i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] -; CHECK-NEXT: uaddlv s0, v0.4h +; CHECK-NEXT: uaddlv h0, v0.8b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %tmp1 = load <8 x i8>, <8 x i8>* %A