Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -14234,21 +14234,44 @@ /// sext/zext can be folded into vsubl. bool ARMTargetLowering::shouldSinkOperands(Instruction *I, SmallVectorImpl &Ops) const { - if (!Subtarget->hasNEON() || !I->getType()->isVectorTy()) + if (!I->getType()->isVectorTy()) return false; - switch (I->getOpcode()) { - case Instruction::Sub: - case Instruction::Add: { - if (!areExtractExts(I->getOperand(0), I->getOperand(1))) + if (Subtarget->hasNEON()) { + switch (I->getOpcode()) { + case Instruction::Sub: + case Instruction::Add: { + if (!areExtractExts(I->getOperand(0), I->getOperand(1))) + return false; + Ops.push_back(&I->getOperandUse(0)); + Ops.push_back(&I->getOperandUse(1)); + return true; + } + default: return false; - Ops.push_back(&I->getOperandUse(0)); - Ops.push_back(&I->getOperandUse(1)); - return true; + } } - default: - return false; + + if (Subtarget->hasMVEIntegerOps()) { + switch (I->getOpcode()) { + case Instruction::Add: + case Instruction::Mul: { + int Op = 0; + if (!isa(I->getOperand(Op))) + Op = 1; + if (match(I->getOperand(Op), + m_ShuffleVector( + m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()), + m_Undef(), m_Zero()))) { + Instruction *Shuffle = dyn_cast(I->getOperand(Op)); + Ops.push_back(&Shuffle->getOperandUse(0)); + Ops.push_back(&I->getOperandUse(Op)); + return true; + } + } + } } + return false; } Index: llvm/test/Transforms/CodeGenPrepare/ARM/sink-add-mul-shufflevector.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/CodeGenPrepare/ARM/sink-add-mul-shufflevector.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp < %s -codegenprepare -S | FileCheck -check-prefix=CHECK %s + +define void @sink_add_mul(i32* %s1, i32 %x, i32* %d, i32 %n) { +; CHECK-LABEL: @sink_add_mul( +; CHECK: vector.ph: +; CHECK-NOT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <4 x i32> undef, i32 [[X:%.*]], i32 0 +; CHECK-NOT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT8]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK: vector.body: +; CHECK: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[X:%.*]], i32 0 +; CHECK: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +; +entry: + %cmp6 = icmp sgt i32 %n, 0 + br i1 %cmp6, label %vector.ph, label %for.cond.cleanup + +vector.ph: ; preds = %for.body.preheader + %n.vec = and i32 %n, -4 + %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %x, i32 0 + %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %s1, i32 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = mul nsw <4 x i32> %wide.load, %broadcast.splat9 + %3 = getelementptr inbounds i32, i32* %d, i32 %index + %4 = bitcast i32* %3 to <4 x i32>* + %wide.load10 = load <4 x i32>, <4 x i32>* %4, align 4 + %5 = add nsw <4 x i32> %wide.load10, %2 + %6 = bitcast i32* %3 to <4 x i32>* + store <4 x i32> %5, <4 x i32>* %6, align 4 + %index.next = add i32 %index, 4 + %7 = icmp eq i32 %index.next, %n.vec + br i1 %7, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %for.body, %middle.block, %entry + ret void +}