Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1229,6 +1229,13 @@ } } + // SVE supports unpklo/hi instructions to reduce the number of loads. + for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) { + setLoadExtAction(Op, MVT::nxv16i64, MVT::nxv16i8, Expand); + setLoadExtAction(Op, MVT::nxv8i64, MVT::nxv8i16, Expand); + setLoadExtAction(Op, MVT::nxv4i64, MVT::nxv4i32, Expand); + } + // SVE supports truncating stores of 64 and 128-bit vectors setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom); setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom); Index: llvm/test/CodeGen/AArch64/sve-intrinsics-mask-loads.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-mask-loads.ll @@ -0,0 +1,111 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s + +define @masked_ld1b_i8_sext( *%base, %mask) { +; CHECK-LABEL: masked_ld1b_i8_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.h, z0.b +; CHECK-NEXT: sunpkhi z0.h, z0.b +; CHECK-NEXT: sunpklo z2.s, z1.h +; CHECK-NEXT: sunpkhi z3.s, z1.h +; CHECK-NEXT: sunpklo z5.s, z0.h +; CHECK-NEXT: sunpkhi z7.s, z0.h +; CHECK-NEXT: sunpklo z0.d, z2.s +; CHECK-NEXT: sunpkhi z1.d, z2.s +; CHECK-NEXT: sunpklo z2.d, z3.s +; CHECK-NEXT: sunpkhi z3.d, z3.s +; CHECK-NEXT: sunpklo z4.d, z5.s +; CHECK-NEXT: sunpkhi z5.d, z5.s +; CHECK-NEXT: sunpklo z6.d, z7.s +; CHECK-NEXT: sunpkhi z7.d, z7.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv16i8.p0nxv16i8(* %base, i32 2, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1b_i8_zext( *%base, %mask) { +; CHECK-LABEL: masked_ld1b_i8_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.h, z0.b +; CHECK-NEXT: uunpkhi z0.h, z0.b +; CHECK-NEXT: uunpklo z2.s, z1.h +; CHECK-NEXT: uunpkhi z3.s, z1.h +; CHECK-NEXT: uunpklo z5.s, z0.h +; CHECK-NEXT: uunpkhi z7.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z2.s +; CHECK-NEXT: uunpkhi z1.d, z2.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: uunpklo z4.d, z5.s +; CHECK-NEXT: uunpkhi z5.d, z5.s +; CHECK-NEXT: uunpklo z6.d, z7.s +; CHECK-NEXT: uunpkhi z7.d, z7.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv16i8.p0nxv16i8(* %base, i32 2, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + +define @masked_ld1h_i16_sext( *%base, %mask) { +; CHECK-LABEL: masked_ld1h_i16_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.s, z0.h +; CHECK-NEXT: sunpkhi z3.s, z0.h +; CHECK-NEXT: sunpklo z0.d, z1.s +; CHECK-NEXT: sunpkhi z1.d, z1.s +; CHECK-NEXT: sunpklo z2.d, z3.s +; CHECK-NEXT: sunpkhi z3.d, z3.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv8i16.p0nxv8i16(* %base, i32 2, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1h_i16_zext( *%base, %mask) { +; CHECK-LABEL: masked_ld1h_i16_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z3.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv8i16.p0nxv8i16(* %base, i32 2, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + +define @masked_ld1w_i32_sext( *%base, %mask) { +; CHECK-LABEL: masked_ld1w_i32_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: sunpklo z0.d, z1.s +; CHECK-NEXT: sunpkhi z1.d, z1.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv4i32.p0nxv4i32(* %base, i32 4, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1w_i32_zext( *%base, %mask) { +; CHECK-LABEL: masked_ld1w_i32_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv4i32.p0nxv4i32(* %base, i32 4, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + +declare @llvm.masked.load.nxv16i8.p0nxv16i8(*, i32 immarg, , ) +declare @llvm.masked.load.nxv8i16.p0nxv8i16(*, i32 immarg, , ) +declare @llvm.masked.load.nxv4i32.p0nxv4i32(*, i32 immarg, , ) + Index: llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll +++ llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll @@ -79,17 +79,14 @@ ; Return type requires splitting define @masked_zload_nxv8i16(* %a, %mask) { ; CHECK-LABEL: masked_zload_nxv8i16: -; CHECK: punpklo p1.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: punpklo p2.h, p1.b -; CHECK-NEXT: punpkhi p1.h, p1.b -; CHECK-NEXT: ld1h { z0.d }, p2/z, [x0] -; CHECK-NEXT: punpklo p2.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: ld1h { z1.d }, p1/z, [x0, #1, mul vl] -; CHECK-NEXT: ld1h { z2.d }, p2/z, [x0, #2, mul vl] -; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl] -; CHECK-NEXT: ret +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z3.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i16(* %a, i32 2, %mask, undef) %ext = zext %load to ret %ext