Index: llvm/include/llvm/Transforms/Scalar/GVN.h =================================================================== --- llvm/include/llvm/Transforms/Scalar/GVN.h +++ llvm/include/llvm/Transforms/Scalar/GVN.h @@ -76,12 +76,12 @@ uint32_t nextValueNumber; - Expression create_expression(Instruction *I); - Expression create_cmp_expression(unsigned Opcode, + Expression createExpr(Instruction *I); + Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS); - Expression create_extractvalue_expression(ExtractValueInst *EI); - uint32_t lookup_or_add_call(CallInst *C); + Expression createExtractvalueExpr(ExtractValueInst *EI); + uint32_t lookupOrAddCall(CallInst *C); public: ValueTable(); @@ -89,9 +89,9 @@ ValueTable(ValueTable &&Arg); ~ValueTable(); - uint32_t lookup_or_add(Value *V); + uint32_t lookupOrAdd(Value *V); uint32_t lookup(Value *V) const; - uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred, + uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS); bool exists(Value *V) const; void add(Value *V, uint32_t num); Index: llvm/lib/Transforms/Scalar/GVN.cpp =================================================================== --- llvm/lib/Transforms/Scalar/GVN.cpp +++ llvm/lib/Transforms/Scalar/GVN.cpp @@ -229,13 +229,13 @@ // ValueTable Internal Functions //===----------------------------------------------------------------------===// -GVN::Expression GVN::ValueTable::create_expression(Instruction *I) { +GVN::Expression GVN::ValueTable::createExpr(Instruction *I) { Expression e; e.type = I->getType(); e.opcode = I->getOpcode(); for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) - e.varargs.push_back(lookup_or_add(*OI)); + e.varargs.push_back(lookupOrAdd(*OI)); if (I->isCommutative()) { // Ensure that commutative instructions that only differ by a permutation // of their operands get the same value number by sorting the operand value @@ -263,14 +263,14 @@ return e; } -GVN::Expression GVN::ValueTable::create_cmp_expression( +GVN::Expression GVN::ValueTable::createCmpExpr( unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && "Not a comparison!"); Expression e; e.type = CmpInst::makeCmpResultType(LHS->getType()); - e.varargs.push_back(lookup_or_add(LHS)); - e.varargs.push_back(lookup_or_add(RHS)); + e.varargs.push_back(lookupOrAdd(LHS)); + e.varargs.push_back(lookupOrAdd(RHS)); // Sort the operand value numbers so xx get the same value number. if (e.varargs[0] > e.varargs[1]) { @@ -282,7 +282,7 @@ } GVN::Expression -GVN::ValueTable::create_extractvalue_expression(ExtractValueInst *EI) { +GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { assert(EI && "Not an ExtractValueInst?"); Expression e; e.type = EI->getType(); @@ -314,8 +314,8 @@ // Intrinsic recognized. Grab its args to finish building the expression. assert(I->getNumArgOperands() == 2 && "Expect two args for recognised intrinsics."); - e.varargs.push_back(lookup_or_add(I->getArgOperand(0))); - e.varargs.push_back(lookup_or_add(I->getArgOperand(1))); + e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); + e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); return e; } } @@ -325,7 +325,7 @@ e.opcode = EI->getOpcode(); for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); OI != OE; ++OI) - e.varargs.push_back(lookup_or_add(*OI)); + e.varargs.push_back(lookupOrAdd(*OI)); for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); II != IE; ++II) @@ -355,15 +355,15 @@ valueNumbering.insert(std::make_pair(V, num)); } -uint32_t GVN::ValueTable::lookup_or_add_call(CallInst *C) { +uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { if (AA->doesNotAccessMemory(C)) { - Expression exp = create_expression(C); + Expression exp = createExpr(C); uint32_t &e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; valueNumbering[C] = e; return e; } else if (AA->onlyReadsMemory(C)) { - Expression exp = create_expression(C); + Expression exp = createExpr(C); uint32_t &e = expressionNumbering[exp]; if (!e) { e = nextValueNumber++; @@ -392,15 +392,15 @@ } for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { - uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); - uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); + uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); + uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i)); if (c_vn != cd_vn) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } } - uint32_t v = lookup_or_add(local_cdep); + uint32_t v = lookupOrAdd(local_cdep); valueNumbering[C] = v; return v; } @@ -446,15 +446,15 @@ return nextValueNumber++; } for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { - uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); - uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); + uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); + uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i)); if (c_vn != cd_vn) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } } - uint32_t v = lookup_or_add(cdep); + uint32_t v = lookupOrAdd(cdep); valueNumbering[C] = v; return v; @@ -469,7 +469,7 @@ /// lookup_or_add - Returns the value number for the specified value, assigning /// it a new number if it did not have one before. -uint32_t GVN::ValueTable::lookup_or_add(Value *V) { +uint32_t GVN::ValueTable::lookupOrAdd(Value *V) { DenseMap::iterator VI = valueNumbering.find(V); if (VI != valueNumbering.end()) return VI->second; @@ -483,7 +483,7 @@ Expression exp; switch (I->getOpcode()) { case Instruction::Call: - return lookup_or_add_call(cast(I)); + return lookupOrAddCall(cast(I)); case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: @@ -522,10 +522,10 @@ case Instruction::ShuffleVector: case Instruction::InsertValue: case Instruction::GetElementPtr: - exp = create_expression(I); + exp = createExpr(I); break; case Instruction::ExtractValue: - exp = create_extractvalue_expression(cast(I)); + exp = createExtractvalueExpr(cast(I)); break; default: valueNumbering[V] = nextValueNumber; @@ -550,10 +550,10 @@ /// assigning it a new number if it did not have one before. Useful when /// we deduced the result of a comparison, but don't immediately have an /// instruction realizing that comparison to hand. -uint32_t GVN::ValueTable::lookup_or_add_cmp(unsigned Opcode, +uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { - Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS); + Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); uint32_t& e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; return e; @@ -1187,7 +1187,7 @@ Res = Load; } else { Res = GetLoadValueForLoad(Load, Offset, LoadTy, InsertPt, gvn); - + DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " << *getCoercedLoadValue() << '\n' << *Res << '\n' << "\n\n\n"); @@ -1220,7 +1220,7 @@ "expected a local dependence"); const DataLayout &DL = LI->getModule()->getDataLayout(); - + if (DepInfo.isClobber()) { // If the dependence is to a store that writes to a superset of the bits // read by the load, we can extract the bits we need for the load from the @@ -1246,7 +1246,7 @@ if (DepLI != LI && Address) { int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); - + if (Offset != -1) { Res = AvailableValue::getLoad(DepLI, Offset); return true; @@ -1279,7 +1279,7 @@ assert(DepInfo.isDef() && "follows from above"); Instruction *DepInst = DepInfo.getInst(); - + // Loading the allocation -> undef. if (isa(DepInst) || isMallocLikeFn(DepInst, TLI) || // Loading immediately after lifetime begin -> undef. @@ -1287,13 +1287,13 @@ Res = AvailableValue::get(UndefValue::get(LI->getType())); return true; } - + // Loading from calloc (which zero initializes memory) -> zero if (isCallocLikeFn(DepInst, TLI)) { Res = AvailableValue::get(Constant::getNullValue(LI->getType())); return true; } - + if (StoreInst *S = dyn_cast(DepInst)) { // Reject loads and stores that are to the same address but are of // different types if we have to. If the stored value is larger or equal to @@ -1302,15 +1302,15 @@ !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(), DL)) return false; - + Res = AvailableValue::get(S->getValueOperand()); return true; } - + if (LoadInst *LD = dyn_cast(DepInst)) { // If the types mismatch and we can't handle it, reject reuse of the load. // If the stored value is larger or equal to the loaded value, we can reuse - // it. + // it. if (LD->getType() != LI->getType() && !CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) return false; @@ -1330,7 +1330,7 @@ } -void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, +void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, AvailValInBlkVect &ValuesPerBlock, UnavailBlkVect &UnavailableBlocks) { @@ -1377,7 +1377,7 @@ } -bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, +bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, UnavailBlkVect &UnavailableBlocks) { // Okay, we have *some* definitions of the value. This means that the value // is available in some of our (transitive) predecessors. Lets think about @@ -1535,7 +1535,7 @@ // parent's availability map. However, in doing so, we risk getting into // ordering issues. If a block hasn't been processed yet, we would be // marking a value as AVAIL-IN, which isn't what we intend. - VN.lookup_or_add(I); + VN.lookupOrAdd(I); } for (const auto &PredLoad : PredLoads) { @@ -1787,7 +1787,7 @@ AvailableValue AV; if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) { Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); - + // Replace the load! patchAndReplaceAllUsesWith(L, AvailableValue); markInstructionForDeletion(L); @@ -1841,9 +1841,8 @@ // GVN runs all such loops have preheaders, which means that Dst will have // been changed to have only one predecessor, namely Src. const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); - const BasicBlock *Src = E.getStart(); - assert((!Pred || Pred == Src) && "No edge between these basic blocks!"); - (void)Src; + assert((!Pred || Pred == E.getStart()) + && "No edge between these basic blocks!"); return Pred != nullptr; } @@ -1878,7 +1877,7 @@ bool Changed = false; // For speed, compute a conservative fast approximation to // DT->dominates(Root, Root.getEnd()); - bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); + const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); while (!Worklist.empty()) { std::pair Item = Worklist.pop_back_val(); @@ -1901,12 +1900,12 @@ // right-hand side, ensure the longest lived term is on the right-hand side, // so the shortest lived term will be replaced by the longest lived. // This tends to expose more simplifications. - uint32_t LVN = VN.lookup_or_add(LHS); + uint32_t LVN = VN.lookupOrAdd(LHS); if ((isa(LHS) && isa(RHS)) || (isa(LHS) && isa(RHS))) { // Move the 'oldest' value to the right-hand side, using the value number // as a proxy for age. - uint32_t RVN = VN.lookup_or_add(RHS); + uint32_t RVN = VN.lookupOrAdd(RHS); if (LVN < RVN) { std::swap(LHS, RHS); LVN = RVN; @@ -1982,7 +1981,7 @@ // Floating point -0.0 and 0.0 compare equal, so we can only // propagate values if we know that we have a constant and that // its value is non-zero. - + // FIXME: We should do this optimization if 'no signed zeros' is // applicable via an instruction-level fast-math-flag or some other // indicator that relaxed FP semantics are being used. @@ -1990,7 +1989,7 @@ if (isa(Op1) && !cast(Op1)->isZero()) Worklist.push_back(std::make_pair(Op0, Op1)); } - + // If "A >= B" is known true, replace "A < B" with false everywhere. CmpInst::Predicate NotPred = Cmp->getInversePredicate(); Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); @@ -1998,7 +1997,7 @@ // out the value number that it would have and use that to find an // appropriate instruction (if any). uint32_t NextNum = VN.getNextUnusedValueNumber(); - uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1); + uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1); // If the number we were assigned was brand new then there is no point in // looking for an instruction realizing it: there cannot be one! if (Num < NextNum) { @@ -2056,7 +2055,7 @@ if (processLoad(LI)) return true; - unsigned Num = VN.lookup_or_add(LI); + unsigned Num = VN.lookupOrAdd(LI); addToLeaderTable(Num, LI, LI->getParent()); return false; } @@ -2120,7 +2119,7 @@ return false; uint32_t NextNum = VN.getNextUnusedValueNumber(); - unsigned Num = VN.lookup_or_add(I); + unsigned Num = VN.lookupOrAdd(I); // Allocations are always uniquely numbered, so we can save time and memory // by fast failing them. @@ -2211,7 +2210,7 @@ cleanupGlobalSets(); // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each - // iteration. + // iteration. DeadBlocks.clear(); return Changed; @@ -2311,8 +2310,6 @@ } bool GVN::performScalarPRE(Instruction *CurInst) { - SmallVector, 8> predMap; - if (isa(CurInst) || isa(CurInst) || isa(CurInst) || CurInst->getType()->isVoidTy() || CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || @@ -2343,8 +2340,8 @@ unsigned NumWithout = 0; BasicBlock *PREPred = nullptr; BasicBlock *CurrentBlock = CurInst->getParent(); - predMap.clear(); + SmallVector, 8> predMap; for (BasicBlock *P : predecessors(CurrentBlock)) { // We're not interested in PRE where the block is its // own predecessor, or in blocks with predecessors @@ -2437,7 +2434,7 @@ DEBUG(verifyRemoved(CurInst)); CurInst->eraseFromParent(); ++NumGVNInstr; - + return true; } @@ -2560,7 +2557,7 @@ SmallVector Dom; DT->getDescendants(D, Dom); DeadBlocks.insert(Dom.begin(), Dom.end()); - + // Figure out the dominance-frontier(D). for (BasicBlock *B : Dom) { for (BasicBlock *S : successors(B)) { @@ -2618,13 +2615,13 @@ // If the given branch is recognized as a foldable branch (i.e. conditional // branch with constant condition), it will perform following analyses and // transformation. -// 1) If the dead out-coming edge is a critical-edge, split it. Let +// 1) If the dead out-coming edge is a critical-edge, split it. Let // R be the target of the dead out-coming edge. // 1) Identify the set of dead blocks implied by the branch's dead outcoming // edge. The result of this step will be {X| X is dominated by R} // 2) Identify those blocks which haves at least one dead predecessor. The // result of this step will be dominance-frontier(R). -// 3) Update the PHIs in DF(R) by replacing the operands corresponding to +// 3) Update the PHIs in DF(R) by replacing the operands corresponding to // dead blocks with "UndefVal" in an hope these PHIs will optimized away. // // Return true iff *NEW* dead code are found. @@ -2640,7 +2637,7 @@ if (!Cond) return false; - BasicBlock *DeadRoot = Cond->getZExtValue() ? + BasicBlock *DeadRoot = Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); if (DeadBlocks.count(DeadRoot)) return false; @@ -2659,7 +2656,7 @@ void GVN::assignValNumForDeadCode() { for (BasicBlock *BB : DeadBlocks) { for (Instruction &Inst : *BB) { - unsigned ValNum = VN.lookup_or_add(&Inst); + unsigned ValNum = VN.lookupOrAdd(&Inst); addToLeaderTable(ValNum, &Inst, BB); } }