Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11129,6 +11129,22 @@ return SDValue(N, 0); // Return N so it doesn't get rechecked! } + // fold (sext_inreg (masked_load x)) -> (sext_masked_load x) + // ignore it if the masked load is already sign extended + if (MaskedLoadSDNode *Ld = dyn_cast(N0)) { + if (ExtVT == Ld->getMemoryVT() && N0.hasOneUse() && + Ld->getExtensionType() != ISD::LoadExtType::NON_EXTLOAD && + TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, ExtVT)) { + SDValue ExtMaskedLoad = DAG.getMaskedLoad( + VT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(), Ld->getOffset(), + Ld->getMask(), Ld->getPassThru(), ExtVT, Ld->getMemOperand(), + Ld->getAddressingMode(), ISD::SEXTLOAD, Ld->isExpandingLoad()); + CombineTo(N, ExtMaskedLoad); + CombineTo(N0.getNode(), ExtMaskedLoad, ExtMaskedLoad.getValue(1)); + return SDValue(N, 0); // Return N so it doesn't get rechecked! + } + } + // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) if (ExtVTBits <= 16 && N0.getOpcode() == ISD::OR) { if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), Index: llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll @@ -0,0 +1,102 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSrc, i32 %blockSize, <4 x i16> %a) { +; CHECK-LABEL: foo_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vpt.s32 lt, q0, zr +; CHECK-NEXT: vldrht.s32 q0, [r0] +; CHECK-NEXT: vcvt.f32.s32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %active.lane.mask = icmp slt <4 x i16> %a, zeroinitializer + %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef) + %0 = sitofp <4 x i16> %wide.masked.load to <4 x float> + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) { +; CHECK-LABEL: foo_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.s8 q0, q0 +; CHECK-NEXT: vpt.s16 lt, q0, zr +; CHECK-NEXT: vldrbt.s16 q0, [r0] +; CHECK-NEXT: vcvt.f16.s16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %active.lane.mask = icmp slt <8 x i8> %a, zeroinitializer + %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef) + %0 = sitofp <8 x i8> %wide.masked.load to <8 x half> + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) { +; CHECK-LABEL: foo_v4i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.s8 q0, q0 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vpt.s32 lt, q0, zr +; CHECK-NEXT: vldrbt.s32 q0, [r0] +; CHECK-NEXT: vcvt.f32.s32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %active.lane.mask = icmp slt <4 x i8> %a, zeroinitializer + %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef) + %0 = sitofp <4 x i8> %wide.masked.load to <4 x float> + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) { +; CHECK-LABEL: foo_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vpt.s32 lt, q0, zr +; CHECK-NEXT: vldrwt.u32 q5, [r0] +; CHECK-NEXT: vmov.f64 d8, d10 +; CHECK-NEXT: vmov.f32 s18, s21 +; CHECK-NEXT: vmov r0, s18 +; CHECK-NEXT: asrs r1, r0, #31 +; CHECK-NEXT: bl __aeabi_l2d +; CHECK-NEXT: vmov r2, s16 +; CHECK-NEXT: vmov d9, r0, r1 +; CHECK-NEXT: asrs r3, r2, #31 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: bl __aeabi_l2d +; CHECK-NEXT: vmov.f64 d12, d11 +; CHECK-NEXT: vmov.f32 s26, s23 +; CHECK-NEXT: vmov d8, r0, r1 +; CHECK-NEXT: vmov r2, s26 +; CHECK-NEXT: asrs r3, r2, #31 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: bl __aeabi_l2d +; CHECK-NEXT: vmov r2, s24 +; CHECK-NEXT: vmov d11, r0, r1 +; CHECK-NEXT: asrs r3, r2, #31 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: bl __aeabi_l2d +; CHECK-NEXT: vmov d10, r0, r1 +; CHECK-NEXT: vmov q0, q4 +; CHECK-NEXT: vmov q1, q5 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: pop {r7, pc} +entry: + %active.lane.mask = icmp slt <4 x i32> %a, zeroinitializer + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) + %0 = sitofp <4 x i32> %wide.masked.load to <4 x double> + ret <4 x double> %0 +} + +declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) + +declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) + +declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>) + +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)