diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11513,9 +11513,12 @@ bool LegalOperations, SDNode *N, SDValue N0, ISD::LoadExtType ExtLoadType, ISD::NodeType ExtOpc) { + // TODO: isFixedLengthVector() should be removed and any negative effects on + // code generation being the result of that target's implementation of + // isVectorLoadExtDesirable(). if (!ISD::isNON_EXTLoad(N0.getNode()) || !ISD::isUNINDEXEDLoad(N0.getNode()) || - ((LegalOperations || VT.isVector() || + ((LegalOperations || VT.isFixedLengthVector() || !cast(N0)->isSimple()) && !TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType()))) return {}; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1231,6 +1231,13 @@ } } + // SVE supports unpklo/hi instructions to reduce the number of loads. + for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) { + setLoadExtAction(Op, MVT::nxv16i64, MVT::nxv16i8, Expand); + setLoadExtAction(Op, MVT::nxv8i64, MVT::nxv8i16, Expand); + setLoadExtAction(Op, MVT::nxv4i64, MVT::nxv4i32, Expand); + } + // SVE supports truncating stores of 64 and 128-bit vectors setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom); setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom); diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll @@ -0,0 +1,102 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s + +; +; LD1B +; + +define @ld1b_i8_sext( *%base) { +; CHECK-LABEL: ld1b_i8_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ld1sb { z4.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: ld1sb { z5.d }, p0/z, [x0, #5, mul vl] +; CHECK-NEXT: ld1sb { z6.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1sb { z7.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = sext %wide.load to + ret %res +} + +define @ld1b_i8_zext( *%base) { +; CHECK-LABEL: ld1b_i8_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ld1b { z4.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: ld1b { z5.d }, p0/z, [x0, #5, mul vl] +; CHECK-NEXT: ld1b { z6.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1b { z7.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = zext %wide.load to + ret %res +} + +; +; LD1H +; + +define @ld1h_i16_sext( *%base) { +; CHECK-LABEL: ld1h_i16_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1sh { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sh { z2.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1sh { z3.d }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = sext %wide.load to + ret %res +} + +define @ld1h_i16_zext( *%base) { +; CHECK-LABEL: ld1h_i16_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z2.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = zext %wide.load to + ret %res +} + +; +; LD1W +; + +define @ld1w_i32_sext( *%base) { +; CHECK-LABEL: ld1w_i32_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = sext %wide.load to + ret %res +} + +define @ld1w_i32_zext( *%base) { +; CHECK-LABEL: ld1w_i32_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = zext %wide.load to + ret %res +} diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll @@ -0,0 +1,123 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s + +; +; LD1B +; + +define @masked_ld1b_i8_sext( *%base, %mask) { +; CHECK-LABEL: masked_ld1b_i8_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.h, z0.b +; CHECK-NEXT: sunpkhi z0.h, z0.b +; CHECK-NEXT: sunpklo z2.s, z1.h +; CHECK-NEXT: sunpkhi z3.s, z1.h +; CHECK-NEXT: sunpklo z5.s, z0.h +; CHECK-NEXT: sunpkhi z7.s, z0.h +; CHECK-NEXT: sunpklo z0.d, z2.s +; CHECK-NEXT: sunpkhi z1.d, z2.s +; CHECK-NEXT: sunpklo z2.d, z3.s +; CHECK-NEXT: sunpkhi z3.d, z3.s +; CHECK-NEXT: sunpklo z4.d, z5.s +; CHECK-NEXT: sunpkhi z5.d, z5.s +; CHECK-NEXT: sunpklo z6.d, z7.s +; CHECK-NEXT: sunpkhi z7.d, z7.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv16i8.p0nxv16i8(* %base, i32 2, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1b_i8_zext( *%base, %mask) { +; CHECK-LABEL: masked_ld1b_i8_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.h, z0.b +; CHECK-NEXT: uunpkhi z0.h, z0.b +; CHECK-NEXT: uunpklo z2.s, z1.h +; CHECK-NEXT: uunpkhi z3.s, z1.h +; CHECK-NEXT: uunpklo z5.s, z0.h +; CHECK-NEXT: uunpkhi z7.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z2.s +; CHECK-NEXT: uunpkhi z1.d, z2.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: uunpklo z4.d, z5.s +; CHECK-NEXT: uunpkhi z5.d, z5.s +; CHECK-NEXT: uunpklo z6.d, z7.s +; CHECK-NEXT: uunpkhi z7.d, z7.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv16i8.p0nxv16i8(* %base, i32 2, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + +; +; LD1H +; + +define @masked_ld1h_i16_sext( *%base, %mask) { +; CHECK-LABEL: masked_ld1h_i16_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.s, z0.h +; CHECK-NEXT: sunpkhi z3.s, z0.h +; CHECK-NEXT: sunpklo z0.d, z1.s +; CHECK-NEXT: sunpkhi z1.d, z1.s +; CHECK-NEXT: sunpklo z2.d, z3.s +; CHECK-NEXT: sunpkhi z3.d, z3.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv8i16.p0nxv8i16(* %base, i32 2, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1h_i16_zext( *%base, %mask) { +; CHECK-LABEL: masked_ld1h_i16_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z3.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv8i16.p0nxv8i16(* %base, i32 2, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + +; +; LD1W +; + +define @masked_ld1w_i32_sext( *%base, %mask) { +; CHECK-LABEL: masked_ld1w_i32_sext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: sunpklo z0.d, z1.s +; CHECK-NEXT: sunpkhi z1.d, z1.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv4i32.p0nxv4i32(* %base, i32 4, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1w_i32_zext( *%base, %mask) { +; CHECK-LABEL: masked_ld1w_i32_zext: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv4i32.p0nxv4i32(* %base, i32 4, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + +declare @llvm.masked.load.nxv16i8.p0nxv16i8(*, i32 immarg, , ) +declare @llvm.masked.load.nxv8i16.p0nxv8i16(*, i32 immarg, , ) +declare @llvm.masked.load.nxv4i32.p0nxv4i32(*, i32 immarg, , ) + diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll --- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll @@ -79,17 +79,14 @@ ; Return type requires splitting define @masked_zload_nxv8i16(* %a, %mask) { ; CHECK-LABEL: masked_zload_nxv8i16: -; CHECK: punpklo p1.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: punpklo p2.h, p1.b -; CHECK-NEXT: punpkhi p1.h, p1.b -; CHECK-NEXT: ld1h { z0.d }, p2/z, [x0] -; CHECK-NEXT: punpklo p2.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: ld1h { z1.d }, p1/z, [x0, #1, mul vl] -; CHECK-NEXT: ld1h { z2.d }, p2/z, [x0, #2, mul vl] -; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl] -; CHECK-NEXT: ret +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z3.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uunpklo z2.d, z3.s +; CHECK-NEXT: uunpkhi z3.d, z3.s +; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv8i16(* %a, i32 2, %mask, undef) %ext = zext %load to ret %ext