Index: include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- include/llvm/Analysis/TargetTransformInfo.h +++ include/llvm/Analysis/TargetTransformInfo.h @@ -444,6 +444,13 @@ bool isLegalMaskedScatter(Type *DataType) const; bool isLegalMaskedGather(Type *DataType) const; + /// Return true if the target has a unified operation to calculate division + /// and remainder. If so, the additional implicit multiplication and + /// subtraction required to calculate a remainder from division are free. This + /// can enable more aggressive transformations for division and remainder than + /// would typically be allowed using throughput or size cost models. + bool hasDivRemOp(Type *DataType, bool IsSigned) const; + /// Return true if target doesn't mind addresses in vectors. bool prefersVectorizedAddressing() const; @@ -911,6 +918,7 @@ virtual bool isLegalMaskedLoad(Type *DataType) = 0; virtual bool isLegalMaskedScatter(Type *DataType) = 0; virtual bool isLegalMaskedGather(Type *DataType) = 0; + virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0; virtual bool prefersVectorizedAddressing() = 0; virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, @@ -1130,6 +1138,9 @@ bool isLegalMaskedGather(Type *DataType) override { return Impl.isLegalMaskedGather(DataType); } + bool hasDivRemOp(Type *DataType, bool IsSigned) override { + return Impl.hasDivRemOp(DataType, IsSigned); + } bool prefersVectorizedAddressing() override { return Impl.prefersVectorizedAddressing(); } Index: include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- include/llvm/Analysis/TargetTransformInfoImpl.h +++ include/llvm/Analysis/TargetTransformInfoImpl.h @@ -251,6 +251,8 @@ bool isLegalMaskedGather(Type *DataType) { return false; } + bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; } + bool prefersVectorizedAddressing() { return true; } int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -113,6 +113,7 @@ void initializeDependenceAnalysisWrapperPassPass(PassRegistry&); void initializeDetectDeadLanesPass(PassRegistry&); void initializeDivergenceAnalysisPass(PassRegistry&); +void initializeDivRemPairsLegacyPassPass(PassRegistry&); void initializeDomOnlyPrinterPass(PassRegistry&); void initializeDomOnlyViewerPass(PassRegistry&); void initializeDomPrinterPass(PassRegistry&); Index: include/llvm/Transforms/Scalar.h =================================================================== --- include/llvm/Transforms/Scalar.h +++ include/llvm/Transforms/Scalar.h @@ -377,6 +377,12 @@ //===----------------------------------------------------------------------===// // +// DivRemPairs - Hoist/decompose integer division and remainder instructions. +// +FunctionPass *createDivRemPairsPass(); + +//===----------------------------------------------------------------------===// +// // MemCpyOpt - This pass performs optimizations related to eliminating memcpy // calls and/or combining multiple stores into memset's. // Index: include/llvm/Transforms/Scalar/DivRemPairs.h =================================================================== --- include/llvm/Transforms/Scalar/DivRemPairs.h +++ include/llvm/Transforms/Scalar/DivRemPairs.h @@ -0,0 +1,31 @@ +//===- DivRemPairs.h - Hoist/decompose integer division and remainder -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass hoists and/or decomposes integer division and remainder +// instructions to enable CFG improvements and better codegen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H +#define LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H + +#include "llvm/IR/PassManager.h" + +namespace llvm { + +/// Hoist/decompose integer division and remainder instructions to enable CFG +/// improvements and better codegen. +struct DivRemPairsPass : public PassInfoMixin { +public: + PreservedAnalyses run(Function &F, FunctionAnalysisManager &); +}; + +} +#endif // LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H + Index: lib/Analysis/TargetTransformInfo.cpp =================================================================== --- lib/Analysis/TargetTransformInfo.cpp +++ lib/Analysis/TargetTransformInfo.cpp @@ -170,6 +170,10 @@ return TTIImpl->isLegalMaskedScatter(DataType); } +bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const { + return TTIImpl->hasDivRemOp(DataType, IsSigned); +} + bool TargetTransformInfo::prefersVectorizedAddressing() const { return TTIImpl->prefersVectorizedAddressing(); } Index: lib/Passes/PassBuilder.cpp =================================================================== --- lib/Passes/PassBuilder.cpp +++ lib/Passes/PassBuilder.cpp @@ -92,6 +92,7 @@ #include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h" #include "llvm/Transforms/Scalar/DCE.h" #include "llvm/Transforms/Scalar/DeadStoreElimination.h" +#include "llvm/Transforms/Scalar/DivRemPairs.h" #include "llvm/Transforms/Scalar/EarlyCSE.h" #include "llvm/Transforms/Scalar/Float2Int.h" #include "llvm/Transforms/Scalar/GVN.h" @@ -765,6 +766,11 @@ // And finally clean up LCSSA form before generating code. OptimizePM.addPass(InstSimplifierPass()); + // This hoists/decomposes div/rem ops. It should run after other sink/hoist + // passes to avoid re-sinking, but before SimplifyCFG because it can allow + // flattening of blocks. + OptimizePM.addPass(DivRemPairsPass()); + // LoopSink (and other loop passes since the last simplifyCFG) might have // resulted in single-entry-single-exit or empty blocks. Clean up the CFG. OptimizePM.addPass(SimplifyCFGPass()); Index: lib/Passes/PassRegistry.def =================================================================== --- lib/Passes/PassRegistry.def +++ lib/Passes/PassRegistry.def @@ -142,6 +142,7 @@ FUNCTION_PASS("consthoist", ConstantHoistingPass()) FUNCTION_PASS("correlated-propagation", CorrelatedValuePropagationPass()) FUNCTION_PASS("dce", DCEPass()) +FUNCTION_PASS("div-rem-pairs", DivRemPairsPass()) FUNCTION_PASS("dse", DSEPass()) FUNCTION_PASS("dot-cfg", CFGPrinterPass()) FUNCTION_PASS("dot-cfg-only", CFGOnlyPrinterPass()) Index: lib/Target/X86/X86TargetTransformInfo.h =================================================================== --- lib/Target/X86/X86TargetTransformInfo.h +++ lib/Target/X86/X86TargetTransformInfo.h @@ -121,6 +121,7 @@ bool isLegalMaskedStore(Type *DataType); bool isLegalMaskedGather(Type *DataType); bool isLegalMaskedScatter(Type *DataType); + bool hasDivRemOp(Type *DataType, bool IsSigned); bool areInlineCompatible(const Function *Caller, const Function *Callee) const; bool expandMemCmp(Instruction *I, unsigned &MaxLoadSize); Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -2365,6 +2365,11 @@ return isLegalMaskedGather(DataType); } +bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { + EVT VT = TLI->getValueType(DL, DataType); + return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); +} + bool X86TTIImpl::areInlineCompatible(const Function *Caller, const Function *Callee) const { const TargetMachine &TM = getTLI()->getTargetMachine(); Index: lib/Transforms/IPO/PassManagerBuilder.cpp =================================================================== --- lib/Transforms/IPO/PassManagerBuilder.cpp +++ lib/Transforms/IPO/PassManagerBuilder.cpp @@ -673,6 +673,11 @@ // Get rid of LCSSA nodes. MPM.add(createInstructionSimplifierPass()); + // This hoists/decomposes div/rem ops. It should run after other sink/hoist + // passes to avoid re-sinking, but before SimplifyCFG because it can allow + // flattening of blocks. + MPM.add(createDivRemPairsPass()); + // LoopSink (and other loop passes since the last simplifyCFG) might have // resulted in single-entry-single-exit or empty blocks. Clean up the CFG. MPM.add(createCFGSimplificationPass()); Index: lib/Transforms/Scalar/CMakeLists.txt =================================================================== --- lib/Transforms/Scalar/CMakeLists.txt +++ lib/Transforms/Scalar/CMakeLists.txt @@ -7,6 +7,7 @@ CorrelatedValuePropagation.cpp DCE.cpp DeadStoreElimination.cpp + DivRemPairs.cpp EarlyCSE.cpp FlattenCFGPass.cpp Float2Int.cpp Index: lib/Transforms/Scalar/DivRemPairs.cpp =================================================================== --- lib/Transforms/Scalar/DivRemPairs.cpp +++ lib/Transforms/Scalar/DivRemPairs.cpp @@ -0,0 +1,203 @@ +//===- DivRemPairs.cpp - Hoist/decompose division and remainder -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass hoists and/or decomposes integer division and remainder +// instructions to enable CFG improvements and better codegen. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Scalar/DivRemPairs.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/Pass.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/BypassSlowDivision.h" +using namespace llvm; + +#define DEBUG_TYPE "div-rem-pairs" +STATISTIC(NumPairs, "Number of div/rem pairs"); +STATISTIC(NumHoisted, "Number of instructions hoisted"); +STATISTIC(NumDecomposed, "Number of instructions decomposed"); + +/// Find matching pairs of integer div/rem ops (they have the same numerator, +/// denominator, and signedness). If they exist in different basic blocks, bring +/// them together by hoisting or replace the common division operation that is +/// implicit in the remainder: +/// X % Y <--> X - ((X / Y) * Y). +/// +/// We can largely ignore the normal safety and cost constraints on speculation +/// of these ops when we find a matching pair. This is because we are already +/// guaranteed that any exceptions and most cost are already incurred by the +/// first member of the pair. +/// +/// Note: This transform could be an oddball enhancement to EarlyCSE, GVN, or +/// SimplifyCFG, but it's split off on its own because it's different enough +/// that it doesn't quite match the stated objectives of those passes. +static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI, + const DominatorTree &DT) { + bool Changed = false; + + // Insert all divide and remainder instructions into maps keyed by their + // operands and opcode (signed or unsigned). + DenseMap DivMap, RemMap; + for (auto &BB : F) { + for (auto &I : BB) { + if (I.getOpcode() == Instruction::SDiv) + DivMap[DivRemMapKey(true, I.getOperand(0), I.getOperand(1))] = &I; + else if (I.getOpcode() == Instruction::UDiv) + DivMap[DivRemMapKey(false, I.getOperand(0), I.getOperand(1))] = &I; + else if (I.getOpcode() == Instruction::SRem) + RemMap[DivRemMapKey(true, I.getOperand(0), I.getOperand(1))] = &I; + else if (I.getOpcode() == Instruction::URem) + RemMap[DivRemMapKey(false, I.getOperand(0), I.getOperand(1))] = &I; + } + } + + // We can iterate over either map because we are only looking for matched + // pairs. Choose remainders for efficiency because they are usually even more + // rare than division. + for (auto &RemPair : RemMap) { + // Find the matching division instruction from the division map. + Instruction *DivInst = DivMap[RemPair.getFirst()]; + if (!DivInst) + continue; + + // We have a matching pair of div/rem instructions. If they are in different + // blocks and one dominates the other, hoist and/or replace one. + NumPairs++; + Instruction *RemInst = RemPair.getSecond(); + BasicBlock *RemBB = RemInst->getParent(); + BasicBlock *DivBB = DivInst->getParent(); + if (RemBB == DivBB) + continue; + + bool DivDominates = DT.dominates(DivBB, RemBB); + if (!DivDominates && !DT.dominates(RemBB, DivBB)) + continue; + + bool IsSigned = DivInst->getOpcode() == Instruction::SDiv; + bool HasDivRemOp = TTI.hasDivRemOp(DivInst->getType(), IsSigned); + if (HasDivRemOp) { + // The target has a single div/rem operation. Hoist the lower instruction + // to make the matched pair visible to the backend. Decomposing the + // remainder when the target has support for it would be wasteful and + // potentially misleading to other passes. + if (DivDominates) + RemInst->moveAfter(DivInst); + else + DivInst->moveAfter(RemInst); + NumHoisted++; + } else { + // The target does not have a single div/rem operation. Decompose the + // remainder calculation as: + // X % Y --> X - ((X / Y) * Y). + Value *X = RemInst->getOperand(0); + Value *Y = RemInst->getOperand(1); + Instruction *Mul = BinaryOperator::CreateMul(DivInst, Y); + Instruction *Sub = BinaryOperator::CreateSub(X, Mul); + + // If the remainder dominates, then hoist the division up to that block: + // + // bb1: + // %rem = srem %x, %y + // bb2: + // %div = sdiv %x, %y + // --> + // bb1: + // %div = sdiv %x, %y + // %mul = mul %div, %y + // %rem = sub %x, %mul + // + // If the division dominates, it's already in the right place. The mul+sub + // will be in a different block because we don't assume that they are + // cheap to speculatively execute: + // + // bb1: + // %div = sdiv %x, %y + // bb2: + // %rem = srem %x, %y + // --> + // bb1: + // %div = sdiv %x, %y + // bb2: + // %mul = mul %div, %y + // %rem = sub %x, %mul + + if (!DivDominates) + DivInst->moveBefore(RemInst); + Mul->insertAfter(RemInst); + Sub->insertAfter(Mul); + + // Now kill the explicit remainder. We have replaced it with: + // (sub X, (mul (div X, Y), Y) + RemInst->replaceAllUsesWith(Sub); + RemInst->eraseFromParent(); + NumDecomposed++; + } + Changed = true; + } + + return Changed; +} + +// Pass manager boilerplate below here. + +namespace { +struct DivRemPairsLegacyPass : public FunctionPass { + static char ID; + DivRemPairsLegacyPass() : FunctionPass(ID) { + initializeDivRemPairsLegacyPassPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.setPreservesCFG(); + AU.addPreserved(); + AU.addPreserved(); + FunctionPass::getAnalysisUsage(AU); + } + + bool runOnFunction(Function &F) override { + if (skipFunction(F)) + return false; + auto &TTI = getAnalysis().getTTI(F); + auto &DT = getAnalysis().getDomTree(); + return optimizeDivRem(F, TTI, DT); + } +}; +} + +char DivRemPairsLegacyPass::ID = 0; +INITIALIZE_PASS_BEGIN(DivRemPairsLegacyPass, "div-rem-pairs", + "Hoist/decompose integer division and remainder", false, + false) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_END(DivRemPairsLegacyPass, "div-rem-pairs", + "Hoist/decompose integer division and remainder", false, + false) +FunctionPass *llvm::createDivRemPairsPass() { + return new DivRemPairsLegacyPass(); +} + +PreservedAnalyses DivRemPairsPass::run(Function &F, + FunctionAnalysisManager &FAM) { + TargetTransformInfo &TTI = FAM.getResult(F); + DominatorTree &DT = FAM.getResult(F); + if (!optimizeDivRem(F, TTI, DT)) + return PreservedAnalyses::all(); + // TODO: This pass just hoists/replaces math ops - all analyses are preserved? + PreservedAnalyses PA; + PA.preserveSet(); + PA.preserve(); + return PA; +} Index: lib/Transforms/Scalar/Scalar.cpp =================================================================== --- lib/Transforms/Scalar/Scalar.cpp +++ lib/Transforms/Scalar/Scalar.cpp @@ -40,6 +40,7 @@ initializeCorrelatedValuePropagationPass(Registry); initializeDCELegacyPassPass(Registry); initializeDeadInstEliminationPass(Registry); + initializeDivRemPairsLegacyPassPass(Registry); initializeScalarizerPass(Registry); initializeDSELegacyPassPass(Registry); initializeGuardWideningLegacyPassPass(Registry); Index: test/Other/new-pm-defaults.ll =================================================================== --- test/Other/new-pm-defaults.ll +++ test/Other/new-pm-defaults.ll @@ -205,6 +205,7 @@ ; CHECK-O-NEXT: Running pass: AlignmentFromAssumptionsPass ; CHECK-O-NEXT: Running pass: LoopSinkPass ; CHECK-O-NEXT: Running pass: InstSimplifierPass +; CHECK-O-NEXT: Running pass: DivRemPairsPass ; CHECK-O-NEXT: Running pass: SimplifyCFGPass ; CHECK-O-NEXT: Finished llvm::Function pass manager run. ; CHECK-O-NEXT: Running pass: GlobalDCEPass Index: test/Other/new-pm-thinlto-defaults.ll =================================================================== --- test/Other/new-pm-thinlto-defaults.ll +++ test/Other/new-pm-thinlto-defaults.ll @@ -193,6 +193,7 @@ ; CHECK-POSTLINK-O-NEXT: Running pass: AlignmentFromAssumptionsPass ; CHECK-POSTLINK-O-NEXT: Running pass: LoopSinkPass ; CHECK-POSTLINK-O-NEXT: Running pass: InstSimplifierPass +; CHECK-POSTLINK-O-NEXT: Running pass: DivRemPairsPass ; CHECK-POSTLINK-O-NEXT: Running pass: SimplifyCFGPass ; CHECK-POSTLINK-O-NEXT: Finished llvm::Function pass manager run. ; CHECK-POSTLINK-O-NEXT: Running pass: GlobalDCEPass Index: test/Transforms/DivRemPairs/div-rem-pairs.ll =================================================================== --- test/Transforms/DivRemPairs/div-rem-pairs.ll +++ test/Transforms/DivRemPairs/div-rem-pairs.ll @@ -0,0 +1,322 @@ +; RUN: opt < %s -div-rem-pairs -S -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=X86 +; RUN: opt < %s -div-rem-pairs -S -mtriple=powerpc64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=PPC + +; Hoist and optionally decompose the sdiv because it's safe and free. +; PR31028 - https://bugs.llvm.org/show_bug.cgi?id=31028 + +define i32 @hoist_sdiv(i32 %a, i32 %b) { +; X86-LABEL: @hoist_sdiv( +; X86-NEXT: entry: +; X86-NEXT: [[REM:%.*]] = srem i32 %a, %b +; X86-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; X86-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 +; X86-NEXT: br i1 [[CMP]], label %if, label %end +; X86: if: +; X86-NEXT: br label %end +; X86: end: +; X86-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] +; X86-NEXT: ret i32 [[RET]] +; +; PPC-LABEL: @hoist_sdiv( +; PPC-NEXT: entry: +; PPC-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; PPC-NEXT: [[TMP0:%.*]] = mul i32 [[DIV]], %b +; PPC-NEXT: [[TMP1:%.*]] = sub i32 %a, [[TMP0]] +; PPC-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 42 +; PPC-NEXT: br i1 [[CMP]], label %if, label %end +; PPC: if: +; PPC-NEXT: br label %end +; PPC: end: +; PPC-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] +; PPC-NEXT: ret i32 [[RET]] +; +entry: + %rem = srem i32 %a, %b + %cmp = icmp eq i32 %rem, 42 + br i1 %cmp, label %if, label %end + +if: + %div = sdiv i32 %a, %b + br label %end + +end: + %ret = phi i32 [ %div, %if ], [ 3, %entry ] + ret i32 %ret +} + +; Hoist and optionally decompose the udiv because it's safe and free. + +define i64 @hoist_udiv(i64 %a, i64 %b) { +; X86-LABEL: @hoist_udiv( +; X86-NEXT: entry: +; X86-NEXT: [[REM:%.*]] = urem i64 %a, %b +; X86-NEXT: [[DIV:%.*]] = udiv i64 %a, %b +; X86-NEXT: [[CMP:%.*]] = icmp eq i64 [[REM]], 42 +; X86-NEXT: br i1 [[CMP]], label %if, label %end +; X86: if: +; X86-NEXT: br label %end +; X86: end: +; X86-NEXT: [[RET:%.*]] = phi i64 [ [[DIV]], %if ], [ 3, %entry ] +; X86-NEXT: ret i64 [[RET]] +; +; PPC-LABEL: @hoist_udiv( +; PPC-NEXT: entry: +; PPC-NEXT: [[DIV:%.*]] = udiv i64 %a, %b +; PPC-NEXT: [[TMP0:%.*]] = mul i64 [[DIV]], %b +; PPC-NEXT: [[TMP1:%.*]] = sub i64 %a, [[TMP0]] +; PPC-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 42 +; PPC-NEXT: br i1 [[CMP]], label %if, label %end +; PPC: if: +; PPC-NEXT: br label %end +; PPC: end: +; PPC-NEXT: [[RET:%.*]] = phi i64 [ [[DIV]], %if ], [ 3, %entry ] +; PPC-NEXT: ret i64 [[RET]] +; +entry: + %rem = urem i64 %a, %b + %cmp = icmp eq i64 %rem, 42 + br i1 %cmp, label %if, label %end + +if: + %div = udiv i64 %a, %b + br label %end + +end: + %ret = phi i64 [ %div, %if ], [ 3, %entry ] + ret i64 %ret +} + +; Hoist the srem if it's safe and free, otherwise decompose it. + +define i16 @hoist_srem(i16 %a, i16 %b) { +; X86-LABEL: @hoist_srem( +; X86-NEXT: entry: +; X86-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b +; X86-NEXT: [[REM:%.*]] = srem i16 %a, %b +; X86-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42 +; X86-NEXT: br i1 [[CMP]], label %if, label %end +; X86: if: +; X86-NEXT: br label %end +; X86: end: +; X86-NEXT: [[RET:%.*]] = phi i16 [ [[REM]], %if ], [ 3, %entry ] +; X86-NEXT: ret i16 [[RET]] +; +; PPC-LABEL: @hoist_srem( +; PPC-NEXT: entry: +; PPC-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b +; PPC-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42 +; PPC-NEXT: br i1 [[CMP]], label %if, label %end +; PPC: if: +; PPC-NEXT: [[TMP0:%.*]] = mul i16 [[DIV]], %b +; PPC-NEXT: [[TMP1:%.*]] = sub i16 %a, [[TMP0]] +; PPC-NEXT: br label %end +; PPC: end: +; PPC-NEXT: [[RET:%.*]] = phi i16 [ [[TMP1]], %if ], [ 3, %entry ] +; PPC-NEXT: ret i16 [[RET]] +; +entry: + %div = sdiv i16 %a, %b + %cmp = icmp eq i16 %div, 42 + br i1 %cmp, label %if, label %end + +if: + %rem = srem i16 %a, %b + br label %end + +end: + %ret = phi i16 [ %rem, %if ], [ 3, %entry ] + ret i16 %ret +} + +; Hoist the urem if it's safe and free, otherwise decompose it. + +define i8 @hoist_urem(i8 %a, i8 %b) { +; X86-LABEL: @hoist_urem( +; X86-NEXT: entry: +; X86-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; X86-NEXT: [[REM:%.*]] = urem i8 %a, %b +; X86-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42 +; X86-NEXT: br i1 [[CMP]], label %if, label %end +; X86: if: +; X86-NEXT: br label %end +; X86: end: +; X86-NEXT: [[RET:%.*]] = phi i8 [ [[REM]], %if ], [ 3, %entry ] +; X86-NEXT: ret i8 [[RET]] +; +; PPC-LABEL: @hoist_urem( +; PPC-NEXT: entry: +; PPC-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; PPC-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42 +; PPC-NEXT: br i1 [[CMP]], label %if, label %end +; PPC: if: +; PPC-NEXT: [[TMP0:%.*]] = mul i8 [[DIV]], %b +; PPC-NEXT: [[TMP1:%.*]] = sub i8 %a, [[TMP0]] +; PPC-NEXT: br label %end +; PPC: end: +; PPC-NEXT: [[RET:%.*]] = phi i8 [ [[TMP1]], %if ], [ 3, %entry ] +; PPC-NEXT: ret i8 [[RET]] +; +entry: + %div = udiv i8 %a, %b + %cmp = icmp eq i8 %div, 42 + br i1 %cmp, label %if, label %end + +if: + %rem = urem i8 %a, %b + br label %end + +end: + %ret = phi i8 [ %rem, %if ], [ 3, %entry ] + ret i8 %ret +} + +; If the ops don't match, don't do anything: signedness. + +define i32 @dont_hoist_udiv(i32 %a, i32 %b) { +; ALL-LABEL: @dont_hoist_udiv( +; ALL-NEXT: entry: +; ALL-NEXT: [[REM:%.*]] = srem i32 %a, %b +; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 +; ALL-NEXT: br i1 [[CMP]], label %if, label %end +; ALL: if: +; ALL-NEXT: [[DIV:%.*]] = udiv i32 %a, %b +; ALL-NEXT: br label %end +; ALL: end: +; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] +; ALL-NEXT: ret i32 [[RET]] +; +entry: + %rem = srem i32 %a, %b + %cmp = icmp eq i32 %rem, 42 + br i1 %cmp, label %if, label %end + +if: + %div = udiv i32 %a, %b + br label %end + +end: + %ret = phi i32 [ %div, %if ], [ 3, %entry ] + ret i32 %ret +} + +; If the ops don't match, don't do anything: operation. + +define i32 @dont_hoist_srem(i32 %a, i32 %b) { +; ALL-LABEL: @dont_hoist_srem( +; ALL-NEXT: entry: +; ALL-NEXT: [[REM:%.*]] = urem i32 %a, %b +; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 +; ALL-NEXT: br i1 [[CMP]], label %if, label %end +; ALL: if: +; ALL-NEXT: [[REM2:%.*]] = srem i32 %a, %b +; ALL-NEXT: br label %end +; ALL: end: +; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[REM2]], %if ], [ 3, %entry ] +; ALL-NEXT: ret i32 [[RET]] +; +entry: + %rem = urem i32 %a, %b + %cmp = icmp eq i32 %rem, 42 + br i1 %cmp, label %if, label %end + +if: + %rem2 = srem i32 %a, %b + br label %end + +end: + %ret = phi i32 [ %rem2, %if ], [ 3, %entry ] + ret i32 %ret +} + +; If the ops don't match, don't do anything: operands. + +define i32 @dont_hoist_sdiv(i32 %a, i32 %b, i32 %c) { +; ALL-LABEL: @dont_hoist_sdiv( +; ALL-NEXT: entry: +; ALL-NEXT: [[REM:%.*]] = srem i32 %a, %b +; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 +; ALL-NEXT: br i1 [[CMP]], label %if, label %end +; ALL: if: +; ALL-NEXT: [[DIV:%.*]] = sdiv i32 %a, %c +; ALL-NEXT: br label %end +; ALL: end: +; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] +; ALL-NEXT: ret i32 [[RET]] +; +entry: + %rem = srem i32 %a, %b + %cmp = icmp eq i32 %rem, 42 + br i1 %cmp, label %if, label %end + +if: + %div = sdiv i32 %a, %c + br label %end + +end: + %ret = phi i32 [ %div, %if ], [ 3, %entry ] + ret i32 %ret +} + +; If the target doesn't have a unified div/rem op for the type, decompose rem in-place to mul+sub. + +define i128 @dont_hoist_urem(i128 %a, i128 %b) { +; ALL-LABEL: @dont_hoist_urem( +; ALL-NEXT: entry: +; ALL-NEXT: [[DIV:%.*]] = udiv i128 %a, %b +; ALL-NEXT: [[CMP:%.*]] = icmp eq i128 [[DIV]], 42 +; ALL-NEXT: br i1 [[CMP]], label %if, label %end +; ALL: if: +; ALL-NEXT: [[TMP0:%.*]] = mul i128 [[DIV]], %b +; ALL-NEXT: [[TMP1:%.*]] = sub i128 %a, [[TMP0]] +; ALL-NEXT: br label %end +; ALL: end: +; ALL-NEXT: [[RET:%.*]] = phi i128 [ [[TMP1]], %if ], [ 3, %entry ] +; ALL-NEXT: ret i128 [[RET]] +; +entry: + %div = udiv i128 %a, %b + %cmp = icmp eq i128 %div, 42 + br i1 %cmp, label %if, label %end + +if: + %rem = urem i128 %a, %b + br label %end + +end: + %ret = phi i128 [ %rem, %if ], [ 3, %entry ] + ret i128 %ret +} + +; We don't hoist if one op does not dominate the other, +; but we could hoist both ops to the common predecessor block? + +define i32 @no_domination(i1 %cmp, i32 %a, i32 %b) { +; ALL-LABEL: @no_domination( +; ALL-NEXT: entry: +; ALL-NEXT: br i1 %cmp, label %if, label %else +; ALL: if: +; ALL-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; ALL-NEXT: br label %end +; ALL: else: +; ALL-NEXT: [[REM:%.*]] = srem i32 %a, %b +; ALL-NEXT: br label %end +; ALL: end: +; ALL-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ [[REM]], %else ] +; ALL-NEXT: ret i32 [[RET]] +; +entry: + br i1 %cmp, label %if, label %else + +if: + %div = sdiv i32 %a, %b + br label %end + +else: + %rem = srem i32 %a, %b + br label %end + +end: + %ret = phi i32 [ %div, %if ], [ %rem, %else ] + ret i32 %ret +} +