Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1229,6 +1229,13 @@ } } + // SVE supports unpklo/hi instructions to reduce the number of loads. + for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) { + setLoadExtAction(Op, MVT::nxv16i64, MVT::nxv16i8, Expand); + setLoadExtAction(Op, MVT::nxv8i64, MVT::nxv8i16, Expand); + setLoadExtAction(Op, MVT::nxv4i64, MVT::nxv4i32, Expand); + } + // SVE supports truncating stores of 64 and 128-bit vectors setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom); setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom); Index: llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll +++ llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll @@ -79,17 +79,14 @@ ; Return type requires splitting define @masked_zload_nxv8i16(* %a, %mask) { ; CHECK-LABEL: masked_zload_nxv8i16: -; CHECK: punpklo p1.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: punpklo p2.h, p1.b -; CHECK-NEXT: punpkhi p1.h, p1.b -; CHECK-NEXT: ld1h { z0.d }, p2/z, [x0] -; CHECK-NEXT: punpklo p2.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: ld1h { z1.d }, p1/z, [x0, #1, mul vl] -; CHECK-NEXT: ld1h { z2.d }, p2/z, [x0, #2, mul vl] -; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl] -; CHECK-NEXT: ret +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z3.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i16(* %a, i32 2, %mask, undef) %ext = zext %load to ret %ext