Index: lib/Target/ARM/ARMISelDAGToDAG.cpp =================================================================== --- lib/Target/ARM/ARMISelDAGToDAG.cpp +++ lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2765,7 +2765,7 @@ } } case ARMISD::SUBE: { - if (!Subtarget->hasV6Ops()) + if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) break; // Look for a pattern to match SMMLS // (sube a, (smul_loHi a, b), (subc 0, (smul_LOhi(a, b)))) Index: lib/Target/ARM/ARMISelLowering.h =================================================================== --- lib/Target/ARM/ARMISelLowering.h +++ lib/Target/ARM/ARMISelLowering.h @@ -203,6 +203,8 @@ SMLALDX, // Signed multiply accumulate long dual exchange SMLSLD, // Signed multiply subtract long dual SMLSLDX, // Signed multiply subtract long dual exchange + MULHS_AR, // Signed multiply long, round and add + MULHS_SR, // Signed multiply long, subtract and round // Operands of the standard BUILD_VECTOR node are not legalized, which // is fine if BUILD_VECTORs are always lowered to shuffles or other Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -1337,6 +1337,8 @@ case ARMISD::SMLALDX: return "ARMISD::SMLALDX"; case ARMISD::SMLSLD: return "ARMISD::SMLSLD"; case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX"; + case ARMISD::MULHS_AR: return "ARMISD::MULHS_AR"; + case ARMISD::MULHS_SR: return "ARMISD::MULHS_SR"; case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; case ARMISD::BFI: return "ARMISD::BFI"; case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; @@ -9877,7 +9879,14 @@ // V V // ADDE <- hiAdd // - assert(AddeNode->getOpcode() == ARMISD::ADDE && "Expect an ADDE"); + // In the special case where only the higher part of a signed result is used + // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts + // a constant with the exact value of 0x80000000, we recognize we are dealing + // with a "rounded multiply and add" (or subtract) and transform it into + // either a ARMISD::MULHS_AR or ARMISD::MULHA_SR respectively. + assert((AddeNode->getOpcode() == ARMISD::ADDE || + AddeNode->getOpcode() == ARMISD::SUBE) && + "Expect an ADDE or SUBE"); assert(AddeNode->getNumOperands() == 3 && AddeNode->getOperand(2).getValueType() == MVT::i32 && @@ -9885,7 +9894,10 @@ // Check that we are chained to the right ADDC node. SDNode* AddcNode = AddeNode->getOperand(2).getNode(); - if (AddcNode->getOpcode() != ARMISD::ADDC) + if ((AddeNode->getOpcode() == ARMISD::ADDE && + AddcNode->getOpcode() != ARMISD::ADDC) || + (AddeNode->getOpcode() == ARMISD::SUBE && + AddcNode->getOpcode() != ARMISD::SUBC)) return SDValue(); SDValue AddcOp0 = AddcNode->getOperand(0); @@ -9968,10 +9980,38 @@ // Create the merged node. SelectionDAG &DAG = DCI.DAG; - // Build operand list. + // Start building operand list. SmallVector Ops; Ops.push_back(LoMul->getOperand(0)); Ops.push_back(LoMul->getOperand(1)); + + + // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be + // the case, we must be doing signed multiplication and only use the higher + // part of the result of the MLAL, furthermore the LowAdd must be a constant + // addition or subtraction with the value of 0x800000. + if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && + FinalOpc == ARMISD::SMLAL && !AddeNode->hasAnyUseOfValue(1) && + LowAdd->getNode()->getOpcode() == ISD::Constant && + static_cast(LowAdd->getNode())->getZExtValue() == + 0x80000000) { + Ops.push_back(*HiAdd); + if (AddcNode->getOpcode() == ARMISD::SUBC) { + FinalOpc = ARMISD::MULHS_SR; + } else { + FinalOpc = ARMISD::MULHS_AR; + } + SDValue NewNode = + DAG.getNode(FinalOpc, SDLoc(AddcNode), MVT::i32, Ops); + DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), NewNode); + + return SDValue(AddeNode, 0); + } else if (AddcNode->getOpcode() == ARMISD::SUBC) + // SMMLS is generated during instruction selection and the rest of this + // function can not handle the case where Addc is a SUBC. + return SDValue(); + + // Finish building the operand list for {U/S}MLAL Ops.push_back(*LowAdd); Ops.push_back(*HiAdd); @@ -10003,16 +10043,19 @@ // Check that we have a glued ADDC node. SDNode* AddcNode = AddeNode->getOperand(2).getNode(); - if (AddcNode->getOpcode() != ARMISD::ADDC) + if (AddcNode->getOpcode() != ARMISD::ADDC && + AddcNode->getOpcode() != ARMISD::SUBC) return SDValue(); // Find the converted UMAAL or quit if it doesn't exist. SDNode *UmlalNode = nullptr; SDValue AddHi; - if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { + if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL && + AddcNode->getOpcode() != ARMISD::SUBC) { UmlalNode = AddcNode->getOperand(0).getNode(); AddHi = AddcNode->getOperand(1); - } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { + } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL && + AddcNode->getOpcode() != ARMISD::SUBC) { UmlalNode = AddcNode->getOperand(1).getNode(); AddHi = AddcNode->getOperand(0); } else { @@ -10098,9 +10141,11 @@ return SDValue(); } -static SDValue PerformAddeSubeCombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformAddeSubeCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget) { if (Subtarget->isThumb1Only()) { + SelectionDAG &DAG = DCI.DAG; SDValue RHS = N->getOperand(1); if (ConstantSDNode *C = dyn_cast(RHS)) { int64_t imm = C->getSExtValue(); @@ -10118,6 +10163,8 @@ N->getOperand(0), RHS, N->getOperand(2)); } } + } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { + return AddCombineTo64bitMLAL(N, DCI, Subtarget); } return SDValue(); } @@ -10130,7 +10177,7 @@ const ARMSubtarget *Subtarget) { // Only ARM and Thumb2 support UMLAL/SMLAL. if (Subtarget->isThumb1Only()) - return PerformAddeSubeCombine(N, DCI.DAG, Subtarget); + return PerformAddeSubeCombine(N, DCI, Subtarget); // Only perform the checks after legalize when the pattern is available. if (DCI.isBeforeLegalize()) return SDValue(); @@ -12338,7 +12385,7 @@ case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); case ARMISD::ADDC: case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); - case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI.DAG, Subtarget); + case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); case ARMISD::BFI: return PerformBFICombine(N, DCI); case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); Index: lib/Target/ARM/ARMInstrInfo.td =================================================================== --- lib/Target/ARM/ARMInstrInfo.td +++ lib/Target/ARM/ARMInstrInfo.td @@ -105,6 +105,14 @@ def ARMSmlsld : SDNode<"ARMISD::SMLSLD", SDT_LongMac>; def ARMSmlsldx : SDNode<"ARMISD::SMLSLDX", SDT_LongMac>; +def SDT_MulHSR : SDTypeProfile<1, 3, [SDTCisVT<0,i32>, + SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>, + SDTCisSameAs<0, 3>]>; + +def ARMmulhs_ar : SDNode<"ARMISD::MULHS_AR", SDT_MulHSR>; +def ARMmulhs_sr : SDNode<"ARMISD::MULHS_SR", SDT_MulHSR>; + // Node definitions. def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>; def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>; @@ -4143,7 +4151,8 @@ } def SMMULR : AMul2I <0b0111010, 0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), - IIC_iMUL32, "smmulr", "\t$Rd, $Rn, $Rm", []>, + IIC_iMUL32, "smmulr", "\t$Rd, $Rn, $Rm", + [(set GPR:$Rd, (ARMmulhs_ar GPR:$Rn, GPR:$Rm, (i32 0)))]>, Requires<[IsARM, HasV6]>, Sched<[WriteMUL32, ReadMUL, ReadMUL]> { let Inst{15-12} = 0b1111; @@ -4158,7 +4167,8 @@ def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), - IIC_iMAC32, "smmlar", "\t$Rd, $Rn, $Rm, $Ra", []>, + IIC_iMAC32, "smmlar", "\t$Rd, $Rn, $Rm, $Ra", + [(set GPR:$Rd, (ARMmulhs_ar GPR:$Rn, GPR:$Rm, GPR:$Ra))]>, Requires<[IsARM, HasV6]>, Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]>; @@ -4170,7 +4180,8 @@ def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), - IIC_iMAC32, "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", []>, + IIC_iMAC32, "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", + [(set GPR:$Rd, (ARMmulhs_sr GPR:$Rn, GPR:$Rm, GPR:$Ra))]>, Requires<[IsARM, HasV6]>, Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]>; Index: lib/Target/ARM/ARMInstrThumb2.td =================================================================== --- lib/Target/ARM/ARMInstrThumb2.td +++ lib/Target/ARM/ARMInstrThumb2.td @@ -2661,7 +2661,9 @@ } def t2SMMUL : T2SMMUL<0b0000, "smmul", [(set rGPR:$Rd, (mulhs rGPR:$Rn, rGPR:$Rm))]>; -def t2SMMULR : T2SMMUL<0b0001, "smmulr", []>; +def t2SMMULR : + T2SMMUL<0b0001, "smmulr", + [(set rGPR:$Rd, (ARMmulhs_ar rGPR:$Rn, rGPR:$Rm, (i32 0)))]>; class T2FourRegSMMLA op22_20, bits<4> op7_4, string opc, list pattern> @@ -2677,9 +2679,11 @@ def t2SMMLA : T2FourRegSMMLA<0b101, 0b0000, "smmla", [(set rGPR:$Rd, (add (mulhs rGPR:$Rm, rGPR:$Rn), rGPR:$Ra))]>; -def t2SMMLAR: T2FourRegSMMLA<0b101, 0b0001, "smmlar", []>; +def t2SMMLAR: T2FourRegSMMLA<0b101, 0b0001, "smmlar", + [(set rGPR:$Rd, (ARMmulhs_ar rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>; def t2SMMLS: T2FourRegSMMLA<0b110, 0b0000, "smmls", []>; -def t2SMMLSR: T2FourRegSMMLA<0b110, 0b0001, "smmlsr", []>; +def t2SMMLSR: T2FourRegSMMLA<0b110, 0b0001, "smmlsr", + [(set rGPR:$Rd, (ARMmulhs_sr rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>; class T2ThreeRegSMUL op22_20, bits<2> op5_4, string opc, list pattern> Index: test/CodeGen/ARM/dsp-mlal.ll =================================================================== --- /dev/null +++ test/CodeGen/ARM/dsp-mlal.ll @@ -0,0 +1,171 @@ +; RUN: llc -mtriple=thumbv7m -mattr=+dsp %s -o - | FileCheck %s +; RUN: llc -mtriple=armv7a %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv7m -mattr=-dsp %s -o - | FileCheck --check-prefix=NODSP %s + +define hidden i32 @SMMULR_SMMLAR(i32 %a, i32 %b0, i32 %b1, i32 %Xn, i32 %Xn1) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMULR_SMMLAR: +; CHECK: ldr r0, [sp] +; CHECK-NEXT: smmulr +; CHECK-NEXT: smmlar +; NODSP-LABEL: SMMULR_SMMLAR: +; NODSP-NOT: smmulr +; NODSP-NOT: smmlar + %conv = sext i32 %b1 to i64 + %conv1 = sext i32 %Xn1 to i64 + %mul = mul nsw i64 %conv1, %conv + %add = add nsw i64 %mul, 2147483648 + %0 = and i64 %add, -4294967296 + %conv4 = sext i32 %b0 to i64 + %conv5 = sext i32 %Xn to i64 + %mul6 = mul nsw i64 %conv5, %conv4 + %add7 = add i64 %mul6, 2147483648 + %add8 = add i64 %add7, %0 + %1 = lshr i64 %add8, 32 + %conv10 = trunc i64 %1 to i32 + ret i32 %conv10 +} + +define hidden i32 @SMMULR(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMULR: +; CHECK: smmulr +; NODSP-LABEL: SMMULR: +; NODSP-NOT: smmulr + %conv = sext i32 %b to i64 + %conv1 = sext i32 %c to i64 + %mul = mul nsw i64 %conv1, %conv + %add = add nsw i64 %mul, 2147483648 + %0 = lshr i64 %add, 32 + %conv2 = trunc i64 %0 to i32 + ret i32 %conv2 +} + +define hidden i32 @SMMUL(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMUL: +; CHECK: smmul +; NODSP-LABEL: SMMUL: +; NODSP-NOT: smmul + %conv = sext i32 %b to i64 + %conv1 = sext i32 %c to i64 + %mul = mul nsw i64 %conv1, %conv + %0 = lshr i64 %mul, 32 + %conv2 = trunc i64 %0 to i32 + ret i32 %conv2 +} + +define hidden i32 @SMMLSR(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMLSR: +; CHECK: smmlsr +; NODSP-LABEL: SMMLSR: +; NODSP-NOT: smmlsr + %conv6 = zext i32 %a to i64 + %shl = shl nuw i64 %conv6, 32 + %conv1 = sext i32 %b to i64 + %conv2 = sext i32 %c to i64 + %mul = mul nsw i64 %conv2, %conv1 + %sub = or i64 %shl, 2147483648 + %add = sub i64 %sub, %mul + %0 = lshr i64 %add, 32 + %conv3 = trunc i64 %0 to i32 + ret i32 %conv3 +} + +define hidden i32 @NOT_SMMLSR(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: NOT_SMMLSR: +; CHECK-NOT: smmlsr +; NODSP-LABEL: NOT_SMMLSR: +; NODSP-NOT: smmlsr + %conv = sext i32 %b to i64 + %conv1 = sext i32 %c to i64 + %mul = mul nsw i64 %conv1, %conv + %add = add nsw i64 %mul, 2147483648 + %0 = lshr i64 %add, 32 + %conv2 = trunc i64 %0 to i32 + %sub = sub nsw i32 %a, %conv2 + ret i32 %sub +} + +define hidden i32 @SMMLS(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMLS: +; CHECK: smmls +; NODSP-LABEL: SMMLS: +; NODSP-NOT: smmls + %conv5 = zext i32 %a to i64 + %shl = shl nuw i64 %conv5, 32 + %conv1 = sext i32 %b to i64 + %conv2 = sext i32 %c to i64 + %mul = mul nsw i64 %conv2, %conv1 + %sub = sub nsw i64 %shl, %mul + %0 = lshr i64 %sub, 32 + %conv3 = trunc i64 %0 to i32 + ret i32 %conv3 +} + +define hidden i32 @NOT_SMMLS(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: NOT_SMMLS: +; CHECK-NOT: smmls +; NODSP-LABEL: NOT_SMMLS: +; NODSP-NOT: smmls + %conv = sext i32 %b to i64 + %conv1 = sext i32 %c to i64 + %mul = mul nsw i64 %conv1, %conv + %0 = lshr i64 %mul, 32 + %conv2 = trunc i64 %0 to i32 + %sub = sub nsw i32 %a, %conv2 + ret i32 %sub +} + +define hidden i32 @SMMLA(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMLA: +; CHECK: smmla +; NODSP-LABEL: SMMLA: +; NODSP-NOT: smmla + %conv = sext i32 %b to i64 + %conv1 = sext i32 %c to i64 + %mul = mul nsw i64 %conv1, %conv + %0 = lshr i64 %mul, 32 + %conv2 = trunc i64 %0 to i32 + %add = add nsw i32 %conv2, %a + ret i32 %add +} + +define hidden i32 @SMMLAR(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: SMMLAR: +; CHECK: smmlar +; NODSP-LABEL: SMMLAR: +; NODSP-NOT: smmlar + %conv7 = zext i32 %a to i64 + %shl = shl nuw i64 %conv7, 32 + %conv1 = sext i32 %b to i64 + %conv2 = sext i32 %c to i64 + %mul = mul nsw i64 %conv2, %conv1 + %add = or i64 %shl, 2147483648 + %add3 = add i64 %add, %mul + %0 = lshr i64 %add3, 32 + %conv4 = trunc i64 %0 to i32 + ret i32 %conv4 +} + +define hidden i32 @NOT_SMMLA(i32 %a, i32 %b, i32 %c) local_unnamed_addr { +entry: +; CHECK-LABEL: NOT_SMMLA: +; CHECK-NOT: smmla +; NODSP-LABEL: NOT_SMMLA: +; NODSP-NOT: smmla + %conv = sext i32 %b to i64 + %conv1 = sext i32 %c to i64 + %mul = mul nsw i64 %conv1, %conv + %0 = lshr i64 %mul, 32 + %conv2 = trunc i64 %0 to i32 + %add = xor i32 %conv2, -2147483648 + %add3 = add i32 %add, %a + ret i32 %add3 +} Index: test/CodeGen/ARM/smml.ll =================================================================== --- test/CodeGen/ARM/smml.ll +++ test/CodeGen/ARM/smml.ll @@ -1,5 +1,5 @@ ; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V4 -; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6 +; RUN: llc -mtriple=armv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6 ; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6 ; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMB ; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6