Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -15324,6 +15324,9 @@ return false; } + if (Subtarget->hasMVEIntegerOps()) + return true; + // Don't create a loadext if we can fold the extension into a wide/long // instruction. // If there's more than one user instruction, the loadext is desirable no Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll @@ -0,0 +1,161 @@ +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -disable-mve-tail-predication=false %s -o - | FileCheck %s + +; CHECK-NOT: vmovlb +define dso_local arm_aapcs_vfpcc void @sext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) { +entry: + %cmp8 = icmp eq i32 %N, 0 + br i1 %cmp8, label %for.cond.cleanup, label %vector.ph + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %N, 7 + %n.vec = and i32 %n.rnd.up, -8 + %trip.count.minus.1 = add i32 %N, -1 + %broadcast.splatinsert10 = insertelement <8 x i32> undef, i32 %trip.count.minus.1, i32 0 + %broadcast.splat11 = shufflevector <8 x i32> %broadcast.splatinsert10, <8 x i32> undef, <8 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer + %induction = or <8 x i32> %broadcast.splat, + %0 = getelementptr inbounds i8, i8* %b, i32 %index + %1 = icmp ule <8 x i32> %induction, %broadcast.splat11 + %2 = bitcast i8* %0 to <8 x i8>* + %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef) + %3 = zext <8 x i8> %wide.masked.load to <8 x i16> + %4 = getelementptr inbounds i16, i16* %a, i32 %index + %5 = bitcast i16* %4 to <8 x i16>* + %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef) + %6 = add <8 x i16> %wide.masked.load12, %3 + %7 = bitcast i16* %4 to <8 x i16>* + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1) + %index.next = add i32 %index, 8 + %8 = icmp eq i32 %index.next, %n.vec + br i1 %8, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret void +} + +; Function Attrs: nofree norecurse nounwind +define dso_local arm_aapcs_vfpcc void @zext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr #0 { +entry: + %cmp8 = icmp eq i32 %N, 0 + br i1 %cmp8, label %for.cond.cleanup, label %vector.ph + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %N, 7 + %n.vec = and i32 %n.rnd.up, -8 + %trip.count.minus.1 = add i32 %N, -1 + %broadcast.splatinsert10 = insertelement <8 x i32> undef, i32 %trip.count.minus.1, i32 0 + %broadcast.splat11 = shufflevector <8 x i32> %broadcast.splatinsert10, <8 x i32> undef, <8 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer + %induction = or <8 x i32> %broadcast.splat, + %0 = getelementptr inbounds i8, i8* %b, i32 %index + %1 = icmp ule <8 x i32> %induction, %broadcast.splat11 + %2 = bitcast i8* %0 to <8 x i8>* + %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef) + %3 = zext <8 x i8> %wide.masked.load to <8 x i16> + %4 = getelementptr inbounds i16, i16* %a, i32 %index + %5 = bitcast i16* %4 to <8 x i16>* + %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef) + %6 = add <8 x i16> %wide.masked.load12, %3 + %7 = bitcast i16* %4 to <8 x i16>* + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1) + %index.next = add i32 %index, 8 + %8 = icmp eq i32 %index.next, %n.vec + br i1 %8, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret void +} + +; Function Attrs: nofree norecurse nounwind +define dso_local arm_aapcs_vfpcc void @sext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 { +entry: + %cmp6 = icmp eq i32 %N, 0 + br i1 %cmp6, label %for.cond.cleanup, label %vector.ph + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %N, 3 + %n.vec = and i32 %n.rnd.up, -4 + %trip.count.minus.1 = add i32 %N, -1 + %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0 + %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = or <4 x i32> %broadcast.splat, + %0 = getelementptr inbounds i16, i16* %b, i32 %index + %1 = icmp ule <4 x i32> %induction, %broadcast.splat9 + %2 = bitcast i16* %0 to <4 x i16>* + %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef) + %3 = sext <4 x i16> %wide.masked.load to <4 x i32> + %4 = getelementptr inbounds i32, i32* %a, i32 %index + %5 = bitcast i32* %4 to <4 x i32>* + %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef) + %6 = add nsw <4 x i32> %wide.masked.load10, %3 + %7 = bitcast i32* %4 to <4 x i32>* + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1) + %index.next = add i32 %index, 4 + %8 = icmp eq i32 %index.next, %n.vec + br i1 %8, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret void +} + +; Function Attrs: nofree norecurse nounwind +define dso_local arm_aapcs_vfpcc void @zext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 { +entry: + %cmp6 = icmp eq i32 %N, 0 + br i1 %cmp6, label %for.cond.cleanup, label %vector.ph + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %N, 3 + %n.vec = and i32 %n.rnd.up, -4 + %trip.count.minus.1 = add i32 %N, -1 + %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0 + %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = or <4 x i32> %broadcast.splat, + %0 = getelementptr inbounds i16, i16* %b, i32 %index + %1 = icmp ule <4 x i32> %induction, %broadcast.splat9 + %2 = bitcast i16* %0 to <4 x i16>* + %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef) + %3 = zext <4 x i16> %wide.masked.load to <4 x i32> + %4 = getelementptr inbounds i32, i32* %a, i32 %index + %5 = bitcast i32* %4 to <4 x i32>* + %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef) + %6 = add <4 x i32> %wide.masked.load10, %3 + %7 = bitcast i32* %4 to <4 x i32>* + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1) + %index.next = add i32 %index, 4 + %8 = icmp eq i32 %index.next, %n.vec + br i1 %8, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body, %entry + ret void +} + +declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) +declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>) +declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>) +declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) +declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) +