diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1231,11 +1231,25 @@ } } - // SVE supports unpklo/hi instructions to reduce the number of loads. - for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) { - setLoadExtAction(Op, MVT::nxv16i64, MVT::nxv16i8, Expand); - setLoadExtAction(Op, MVT::nxv8i64, MVT::nxv8i16, Expand); - setLoadExtAction(Op, MVT::nxv4i64, MVT::nxv4i32, Expand); + // Firstly, exclude all scalable vector extending loads/truncating stores. + for (MVT VT : MVT::integer_scalable_vector_valuetypes()) { + for (MVT InnerVT : MVT::integer_scalable_vector_valuetypes()) { + // TODO: truncating stores should also be exclude + setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + } + } + + // Then, selectively enable those which we directly support. + for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) { + setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal); + setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal); + setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal); + setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal); + setLoadExtAction(Op, MVT::nxv2i32, MVT::nxv2i16, Legal); + setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal); + setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal); } // SVE supports truncating stores of 64 and 128-bit vectors diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll @@ -2,9 +2,37 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s ; -; LD1B +; LD1SB/LD1B ; +define @ld1b_i8_sext_i32( *%base) { +; CHECK-LABEL: ld1b_i8_sext_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z2.s }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1sb { z3.s }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = sext %wide.load to + ret %res +} + +define @ld1b_i8_zext_i32( *%base) { +; CHECK-LABEL: ld1b_i8_zext_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z2.s }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1b { z3.s }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ret + %wide.load = load , * %base + %res = zext %wide.load to + ret %res +} + define @ld1b_i8_sext( *%base) { ; CHECK-LABEL: ld1b_i8_sext: ; CHECK: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll @@ -5,6 +5,38 @@ ; LD1B ; +define @masked_ld1b_i8_sext_i32( *%base, %mask) { +; CHECK-LABEL: masked_ld1b_i8_sext_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.h, z0.b +; CHECK-NEXT: sunpkhi z3.h, z0.b +; CHECK-NEXT: sunpklo z0.s, z1.h +; CHECK-NEXT: sunpkhi z1.s, z1.h +; CHECK-NEXT: sunpklo z2.s, z3.h +; CHECK-NEXT: sunpkhi z3.s, z3.h +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv16i8.p0nxv16i8(* %base, i32 2, %mask, undef) + %res = sext %wide.masked.load to + ret %res +} + +define @masked_ld1b_i8_zext_i32( *%base, %mask) { +; CHECK-LABEL: masked_ld1b_i8_zext_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: uunpklo z1.h, z0.b +; CHECK-NEXT: uunpkhi z3.h, z0.b +; CHECK-NEXT: uunpklo z0.s, z1.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpklo z2.s, z3.h +; CHECK-NEXT: uunpkhi z3.s, z3.h +; CHECK-NEXT: ret + %wide.masked.load = call @llvm.masked.load.nxv16i8.p0nxv16i8(* %base, i32 2, %mask, undef) + %res = zext %wide.masked.load to + ret %res +} + define @masked_ld1b_i8_sext( *%base, %mask) { ; CHECK-LABEL: masked_ld1b_i8_sext: ; CHECK: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll --- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll @@ -73,16 +73,13 @@ ; Return type requires splitting define @masked_sload_nxv16i8(* %a, %mask) { ; CHECK-LABEL: masked_sload_nxv16i8: -; CHECK: punpklo p1.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: punpklo p2.h, p1.b -; CHECK-NEXT: punpkhi p1.h, p1.b -; CHECK-NEXT: ld1sb { z0.s }, p2/z, [x0] -; CHECK-NEXT: punpklo p2.h, p0.b -; CHECK-NEXT: punpkhi p0.h, p0.b -; CHECK-NEXT: ld1sb { z1.s }, p1/z, [x0, #1, mul vl] -; CHECK-NEXT: ld1sb { z2.s }, p2/z, [x0, #2, mul vl] -; CHECK-NEXT: ld1sb { z3.s }, p0/z, [x0, #3, mul vl] +; CHECK: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.h, z0.b +; CHECK-NEXT: sunpkhi z3.h, z0.b +; CHECK-NEXT: sunpklo z0.s, z1.h +; CHECK-NEXT: sunpkhi z1.s, z1.h +; CHECK-NEXT: sunpklo z2.s, z3.h +; CHECK-NEXT: sunpkhi z3.s, z3.h ; CHECK-NEXT: ret %load = call @llvm.masked.load.nxv16i8(* %a, i32 2, %mask, undef) %ext = sext %load to @@ -92,14 +89,13 @@ ; Masked load requires promotion define @masked_sload_4i8_4f32(* noalias %in, %mask) { ; CHECK-LABEL: masked_sload_4i8_4f32: -; CHECK: punpkhi p2.h, p0.b -; CHECK-NEXT: punpklo p0.h, p0.b -; CHECK-NEXT: ld1sb { z1.d }, p2/z, [x0, #1, mul vl] -; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] -; CHECK-NEXT: ptrue p1.d -; CHECK-NEXT: scvtf z0.d, p1/m, z0.d -; CHECK-NEXT: scvtf z1.d, p1/m, z1.d -; CHECK-NEXT: ret +; CHECK: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p1.d +; CHECK-NEXT: sunpkhi z1.d, z0.s +; CHECK-NEXT: sunpklo z0.d, z0.s +; CHECK-NEXT: scvtf z0.d, p1/m, z0.d +; CHECK-NEXT: scvtf z1.d, p1/m, z1.d +; CHECK-NEXT: ret %wide.load = call @llvm.masked.load.nxv4i8(* %in, i32 2, %mask, undef) %sext = sext %wide.load to %res = sitofp %sext to