Index: lib/Target/ARM/ARMParallelDSP.cpp =================================================================== --- lib/Target/ARM/ARMParallelDSP.cpp +++ lib/Target/ARM/ARMParallelDSP.cpp @@ -165,9 +165,14 @@ }; } -template +// MaxBitwidth: the maximum supported bitwidth of the elements in the DSP +// instructions, which is set to 16. So here we should collect all i8 and i16 +// narrow operations. +// TODO: we currently only collect i16, and will support i8 later, so that's +// why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth. +template static bool IsNarrowSequence(Value *V, ValueList &VL) { - LLVM_DEBUG(dbgs() << "Is narrow sequence: "; V->dump()); + LLVM_DEBUG(dbgs() << "Is narrow sequence? "; V->dump()); ConstantInt *CInt; if (match(V, m_ConstantInt(CInt))) { @@ -180,38 +185,30 @@ return false; Value *Val, *LHS, *RHS; - bool isNarrow = false; - if (match(V, m_Trunc(m_Value(Val)))) { - if (cast(I)->getDestTy()->getIntegerBitWidth() == BitWidth) - isNarrow = IsNarrowSequence(Val, VL); + if (cast(I)->getDestTy()->getIntegerBitWidth() == MaxBitWidth) + return IsNarrowSequence(Val, VL); } else if (match(V, m_Add(m_Value(LHS), m_Value(RHS)))) { // TODO: we need to implement sadd16/sadd8 for this, which enables to // also do the rewrite for smlad8.ll, but it is unsupported for now. - isNarrow = false; + LLVM_DEBUG(dbgs() << "No, unsupported Op:\t"; I->dump()); + return false; } else if (match(V, m_ZExtOrSExt(m_Value(Val)))) { - if (cast(I)->getSrcTy()->getIntegerBitWidth() == BitWidth) - isNarrow = true; - else - LLVM_DEBUG(dbgs() << "Wrong SrcTy size of CastInst: " << - cast(I)->getSrcTy()->getIntegerBitWidth()); - - if (match(Val, m_Load(m_Value(Val)))) { - auto *Ld = dyn_cast(I->getOperand(0)); - LLVM_DEBUG(dbgs() << "Found narrow Load:\t"; Ld->dump()); - VL.push_back(Ld); - isNarrow = true; - } else if (!isa(I->getOperand(0))) - VL.push_back(I->getOperand(0)); - } - - if (isNarrow) { - LLVM_DEBUG(dbgs() << "Found narrow Op:\t"; I->dump()); - VL.push_back(I); - } else - LLVM_DEBUG(dbgs() << "Found unsupported Op:\t"; I->dump()); + if (cast(I)->getSrcTy()->getIntegerBitWidth() != MaxBitWidth) { + LLVM_DEBUG(dbgs() << "No, wrong SrcTy size: " << + cast(I)->getSrcTy()->getIntegerBitWidth() << "\n"); + return false; + } - return isNarrow; + if (match(Val, m_Load(m_Value()))) { + LLVM_DEBUG(dbgs() << "Yes, found narrow Load:\t"; Val->dump()); + VL.push_back(Val); + VL.push_back(I); + return true; + } + } + LLVM_DEBUG(dbgs() << "No, unsupported Op:\t"; I->dump()); + return false; } // Element-by-element comparison of Value lists returning true if they are Index: test/CodeGen/ARM/smlad1.ll =================================================================== --- test/CodeGen/ARM/smlad1.ll +++ test/CodeGen/ARM/smlad1.ll @@ -1,5 +1,6 @@ ; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s +; CHECK-LABEL: @test1 ; CHECK: %mac1{{\.}}026 = phi i32 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ] ; CHECK: [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32* ; CHECK: [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2 @@ -7,7 +8,7 @@ ; CHECK: [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2 ; CHECK: [[V8]] = call i32 @llvm.arm.smlad(i32 [[V5]], i32 [[V7]], i32 %mac1{{\.}}026) -define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) { +define dso_local i32 @test1(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) { entry: %cmp24 = icmp sgt i32 %arg, 0 br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup @@ -48,3 +49,47 @@ br i1 %exitcond, label %for.body, label %for.cond.cleanup } +; Here we have i8 loads, which we do want to support, but don't handle yet. +; +; CHECK-LABEL: @test2 +; CHECK-NOT: call i32 @llvm.arm.smlad +; +define dso_local i32 @test2(i32 %arg, i32* nocapture readnone %arg1, i8* nocapture readonly %arg2, i8* nocapture readonly %arg3) { +entry: + %cmp24 = icmp sgt i32 %arg, 0 + br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup + +for.body.preheader: + %.pre = load i8, i8* %arg3, align 2 + %.pre27 = load i8, i8* %arg2, align 2 + br label %for.body + +for.cond.cleanup: + %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ] + ret i32 %mac1.0.lcssa + +for.body: + %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ] + %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i8, i8* %arg3, i32 %i.025 + %0 = load i8, i8* %arrayidx, align 2 + %add = add nuw nsw i32 %i.025, 1 + %arrayidx1 = getelementptr inbounds i8, i8* %arg3, i32 %add + %1 = load i8, i8* %arrayidx1, align 2 + %arrayidx3 = getelementptr inbounds i8, i8* %arg2, i32 %i.025 + %2 = load i8, i8* %arrayidx3, align 2 + %conv = sext i8 %2 to i32 + %conv4 = sext i8 %0 to i32 + %mul = mul nsw i32 %conv, %conv4 + %arrayidx6 = getelementptr inbounds i8, i8* %arg2, i32 %add + %3 = load i8, i8* %arrayidx6, align 2 + %conv7 = sext i8 %3 to i32 + %conv8 = sext i8 %1 to i32 + %mul9 = mul nsw i32 %conv7, %conv8 + %add10 = add i32 %mul, %mac1.026 + %add11 = add i32 %add10, %mul9 + %exitcond = icmp ne i32 %add, %arg + br i1 %exitcond, label %for.body, label %for.cond.cleanup +} + +