diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -13358,14 +13358,14 @@ // to make better use of vaddva style instructions. if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && IsVecReduce(N1.getOperand(0)) && IsVecReduce(N1.getOperand(1)) && - !isa(N0)) { + !isa(N0) && N1->hasOneUse()) { SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0, N1.getOperand(0)); return DAG.getNode(ISD::ADD, dl, VT, Add0, N1.getOperand(1)); } // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> // add(add(add(A, C), reduce(B)), reduce(D)) if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && - N1.getOpcode() == ISD::ADD) { + N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) { unsigned N0RedOp = 0; if (!IsVecReduce(N0.getOperand(N0RedOp))) { N0RedOp = 1; @@ -13432,7 +13432,7 @@ }; SDValue X; - if (N0.getOpcode() == ISD::ADD) { + if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) { if (IsVecReduce(N0.getOperand(0)) && IsVecReduce(N0.getOperand(1))) { int IsBefore = IsKnownOrderedLoad(N0.getOperand(0).getOperand(0), N0.getOperand(1).getOperand(0)); diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll @@ -0,0 +1,103 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK + +define arm_aapcs_vfpcc i32 @test1(ptr %ptr, i32 %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { +; CHECK-LABEL: test1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vaddv.u32 r2, q1 +; CHECK-NEXT: vaddva.u32 r2, q0 +; CHECK-NEXT: str r2, [r0] +; CHECK-NEXT: adds r0, r2, r1 +; CHECK-NEXT: bx lr +entry: + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg2) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3) + %add1 = add i32 %reduce1, %reduce2 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %add1, %arg1 + ret i32 %add2 +} + +define arm_aapcs_vfpcc i32 @test2(ptr %ptr, i32 %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { +; CHECK-LABEL: test2: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vaddv.u32 r2, q1 +; CHECK-NEXT: vaddva.u32 r2, q0 +; CHECK-NEXT: str r2, [r0] +; CHECK-NEXT: adds r0, r1, r2 +; CHECK-NEXT: bx lr +entry: + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg2) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3) + %add1 = add i32 %reduce1, %reduce2 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %arg1, %add1 + ret i32 %add2 +} + +define arm_aapcs_vfpcc i32 @test3(ptr %ptr, i32 %arg1, i32 %arg2, <4 x i32> %arg3, <4 x i32> %arg4) { +; CHECK-LABEL: test3: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: vaddva.u32 r2, q1 +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: str.w r12, [r0] +; CHECK-NEXT: add.w r0, r12, r2 +; CHECK-NEXT: bx lr +entry: + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg4) + %add1 = add i32 %arg1, %reduce1 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %arg2, %reduce2 + %add3 = add i32 %add1, %add2 + ret i32 %add3 +} + +define arm_aapcs_vfpcc i32 @test4(ptr %ptr, i32 %arg1, ptr %arg2) { +; CHECK-LABEL: test4: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: vldrw.u32 q0, [r2, #4] +; CHECK-NEXT: str.w r12, [r0] +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: mov r0, r12 +; CHECK-NEXT: bx lr +entry: + %load1 = load <4 x i32>, <4 x i32>* %arg2, align 4 + %gep = getelementptr inbounds i32, i32* %arg2, i32 1 + %load2 = load <4 x i32>, <4 x i32>* %gep, align 4 + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load1) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load2) + %add1 = add i32 %arg1, %reduce1 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %add1, %reduce2 + ret i32 %add2 +} + +define arm_aapcs_vfpcc i32 @test5(ptr %ptr, i32 %arg1, ptr %arg2) { +; CHECK-LABEL: test5: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r2, #4] +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: str.w r12, [r0] +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: mov r0, r12 +; CHECK-NEXT: bx lr +entry: + %load1 = load <4 x i32>, <4 x i32>* %arg2, align 4 + %gep = getelementptr inbounds i32, i32* %arg2, i32 1 + %load2 = load <4 x i32>, <4 x i32>* %gep, align 4 + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load1) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load2) + %add1 = add i32 %arg1, %reduce2 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %add1, %reduce1 + ret i32 %add2 +} + +declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)