diff --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h --- a/llvm/include/llvm/Support/Alignment.h +++ b/llvm/include/llvm/Support/Alignment.h @@ -395,6 +395,16 @@ return Lhs && (*Lhs).value() > Rhs.value(); } +inline Align operator*(Align Lhs, uint64_t Rhs) { + assert(Rhs > 0 && "Rhs must be positive"); + return Align(Lhs.value() * Rhs); +} + +inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) { + assert(Rhs > 0 && "Rhs must be positive"); + return Lhs ? Lhs.getValue() * Rhs : MaybeAlign(); +} + inline Align operator/(Align Lhs, uint64_t Divisor) { assert(llvm::isPowerOf2_64(Divisor) && "Divisor must be positive and a power of 2"); @@ -416,6 +426,19 @@ return Rhs && *Rhs > Lhs ? *Rhs : Lhs; } +#ifndef NDEBUG +// For usage in LLVM_DEBUG macros. +inline std::string DebugStr(const Align &A) { + return "Align(" + std::to_string(A.value()) + ")"; +} +// For usage in LLVM_DEBUG macros. +inline std::string DebugStr(const MaybeAlign &MA) { + if (MA) + return "MaybeAlign(" + std::to_string(MA->value()) + ")"; + return "MaybeAlign(None)"; +} +#endif + #undef ALIGN_CHECK_ISPOSITIVE #undef ALIGN_CHECK_ISSET diff --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp --- a/llvm/lib/CodeGen/MachineFrameInfo.cpp +++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp @@ -41,8 +41,9 @@ Align StackAlignment) { if (!ShouldClamp || Alignment <= StackAlignment) return Alignment; - LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value() - << " exceeds the stack alignment " << StackAlignment.value() + LLVM_DEBUG(dbgs() << "Warning: requested alignment " << DebugStr(Alignment) + << " exceeds the stack alignment " + << DebugStr(StackAlignment) << " when stack realignment is off" << '\n'); return StackAlignment; } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -9498,8 +9498,8 @@ if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) { LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca " "greater than stack argument alignment (" - << RequiredAlignment.value() << " vs " - << MFI.getObjectAlign(FixedIndex).value() << ")\n"); + << DebugStr(RequiredAlignment) << " vs " + << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n"); return; } diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp --- a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp @@ -266,7 +266,7 @@ << "spOffset : " << spOffset << "\n" << "stackSize : " << stackSize << "\n" << "alignment : " - << MF.getFrameInfo().getObjectAlign(FrameIndex).value() + << DebugStr(MF.getFrameInfo().getObjectAlign(FrameIndex)) << "\n"); eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset); diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -297,9 +297,7 @@ friend struct DFSanFunction; friend class DFSanVisitor; - enum { - ShadowWidth = 16 - }; + enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 }; /// Which ABI should be used for instrumented functions? enum InstrumentedABI { @@ -577,11 +575,11 @@ Mod = &M; Ctx = &M.getContext(); - ShadowTy = IntegerType::get(*Ctx, ShadowWidth); + ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits); ShadowPtrTy = PointerType::getUnqual(ShadowTy); IntptrTy = DL.getIntPtrType(*Ctx); ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); - ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8); + ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes); if (IsX86_64) ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); else if (IsMIPS64) @@ -1238,7 +1236,7 @@ } } - const MaybeAlign ShadowAlign(Align * DFS.ShadowWidth / 8); + const MaybeAlign ShadowAlign(Align * DFS.ShadowWidthBytes); SmallVector Objs; GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); bool AllConstants = true; @@ -1272,7 +1270,7 @@ IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos); } } - if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) { + if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) { // Fast path for the common case where each byte has identical shadow: load // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any // shadow is non-equal. @@ -1284,15 +1282,15 @@ FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); // Compare each of the shadows stored in the loaded 64 bits to each other, - // by computing (WideShadow rotl ShadowWidth) == WideShadow. + // by computing (WideShadow rotl ShadowWidthBits) == WideShadow. IRBuilder<> IRB(Pos); Value *WideAddr = IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); Value *WideShadow = IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); - Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth); - Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth); + Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits); + Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits); Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); @@ -1315,8 +1313,8 @@ ReplaceInstWithInst(Head->getTerminator(), LastBr); DT.addNewBlock(FallbackBB, Head); - for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size; - Ofs += 64 / DFS.ShadowWidth) { + for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; + Ofs += 64 / DFS.ShadowWidthBits) { BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); DT.addNewBlock(NextBB, LastBr->getParent()); IRBuilder<> NextIRB(NextBB); @@ -1386,11 +1384,12 @@ } } - const Align ShadowAlign(Alignment.value() * (DFS.ShadowWidth / 8)); + const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes); IRBuilder<> IRB(Pos); Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); if (Shadow == DFS.ZeroShadow) { - IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth); + IntegerType *ShadowTy = + IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); Value *ExtShadowAddr = IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); @@ -1398,7 +1397,7 @@ return; } - const unsigned ShadowVecSize = 128 / DFS.ShadowWidth; + const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits; uint64_t Offset = 0; if (Size >= ShadowVecSize) { VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize); @@ -1548,9 +1547,9 @@ IRBuilder<> IRB(&I); Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); - Value *LenShadow = IRB.CreateMul( - I.getLength(), - ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8)); + Value *LenShadow = + IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(), + DFSF.DFS.ShadowWidthBytes)); Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); @@ -1558,11 +1557,11 @@ IRB.CreateCall(I.getFunctionType(), I.getCalledValue(), {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); if (ClPreserveAlignment) { - MTI->setDestAlignment(I.getDestAlignment() * (DFSF.DFS.ShadowWidth / 8)); - MTI->setSourceAlignment(I.getSourceAlignment() * (DFSF.DFS.ShadowWidth / 8)); + MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); + MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes); } else { - MTI->setDestAlignment(DFSF.DFS.ShadowWidth / 8); - MTI->setSourceAlignment(DFSF.DFS.ShadowWidth / 8); + MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes)); + MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes)); } if (ClEventCallbacks) { IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn, diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -90,9 +90,9 @@ // to a constant. Using SCEV to compute alignment handles the case where // DiffSCEV is a recurrence with constant start such that the aligned offset // is constant. e.g. {16,+,32} % 32 -> 16. -static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV, - const SCEV *AlignSCEV, - ScalarEvolution *SE) { +static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV, + const SCEV *AlignSCEV, + ScalarEvolution *SE) { // DiffUnits = Diff % int64_t(Alignment) const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV); @@ -107,25 +107,24 @@ // displaced pointer has the same alignment as the aligned pointer, so // return the alignment value. if (!DiffUnits) - return (unsigned) - cast(AlignSCEV)->getValue()->getSExtValue(); + return cast(AlignSCEV)->getValue()->getAlignValue(); // If the displacement is not an exact multiple, but the remainder is a // constant, then return this remainder (but only if it is a power of 2). uint64_t DiffUnitsAbs = std::abs(DiffUnits); if (isPowerOf2_64(DiffUnitsAbs)) - return (unsigned) DiffUnitsAbs; + return Align(DiffUnitsAbs); } - return 0; + return None; } // There is an address given by an offset OffSCEV from AASCEV which has an // alignment AlignSCEV. Use that information, if possible, to compute a new // alignment for Ptr. -static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, - const SCEV *OffSCEV, Value *Ptr, - ScalarEvolution *SE) { +static MaybeAlign getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, + const SCEV *OffSCEV, Value *Ptr, + ScalarEvolution *SE) { const SCEV *PtrSCEV = SE->getSCEV(Ptr); // On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes // (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV @@ -146,13 +145,12 @@ << *AlignSCEV << " and offset " << *OffSCEV << " using diff " << *DiffSCEV << "\n"); - unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE); - LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n"); + if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) { + LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n"); + return *NewAlignment; + } - if (NewAlignment) { - return NewAlignment; - } else if (const SCEVAddRecExpr *DiffARSCEV = - dyn_cast(DiffSCEV)) { + if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast(DiffSCEV)) { // The relative offset to the alignment assumption did not yield a constant, // but we should try harder: if we assume that a is 32-byte aligned, then in // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are @@ -170,34 +168,34 @@ // first iteration, and also the alignment using the per-iteration delta. // If these are the same, then use that answer. Otherwise, use the smaller // one, but only if it divides the larger one. - NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE); - unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE); - - LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n"); - LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n"); - - if (!NewAlignment || !NewIncAlignment) { - return 0; - } else if (NewAlignment > NewIncAlignment) { - if (NewAlignment % NewIncAlignment == 0) { - LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment - << "\n"); - return NewIncAlignment; - } - } else if (NewIncAlignment > NewAlignment) { - if (NewIncAlignment % NewAlignment == 0) { - LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment - << "\n"); - return NewAlignment; - } - } else if (NewIncAlignment == NewAlignment) { - LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment - << "\n"); + MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE); + MaybeAlign NewIncAlignment = + getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE); + + LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment) + << "\n"); + LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment) + << "\n"); + + // Both None or set to the same value. + if (NewAlignment == NewIncAlignment) { + LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " + << DebugStr(NewAlignment) << "\n"); + return NewAlignment; + } + if (NewAlignment > NewIncAlignment) { + LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " + << DebugStr(NewIncAlignment) << "\n"); + return NewIncAlignment; + } + if (NewIncAlignment > NewAlignment) { + LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " + << DebugStr(NewAlignment) << "\n"); return NewAlignment; } } - return 0; + return None; } bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I, @@ -323,26 +321,27 @@ Instruction *J = WorkList.pop_back_val(); if (LoadInst *LI = dyn_cast(J)) { - unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, - LI->getPointerOperand(), SE); + MaybeAlign NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, + LI->getPointerOperand(), SE); if (NewAlignment > LI->getAlignment()) { LI->setAlignment(MaybeAlign(NewAlignment)); ++NumLoadAlignChanged; } } else if (StoreInst *SI = dyn_cast(J)) { - unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, - SI->getPointerOperand(), SE); + MaybeAlign NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, + SI->getPointerOperand(), SE); if (NewAlignment > SI->getAlignment()) { SI->setAlignment(MaybeAlign(NewAlignment)); ++NumStoreAlignChanged; } } else if (MemIntrinsic *MI = dyn_cast(J)) { - unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, - MI->getDest(), SE); + MaybeAlign NewDestAlignment = + getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); - LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";); + LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment) + << "\n";); if (NewDestAlignment > MI->getDestAlignment()) { MI->setDestAlignment(NewDestAlignment); ++NumMemIntAlignChanged; @@ -351,10 +350,11 @@ // For memory transfers, there is also a source alignment that // can be set. if (MemTransferInst *MTI = dyn_cast(MI)) { - unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, - MTI->getSource(), SE); + MaybeAlign NewSrcAlignment = + getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE); - LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";); + LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment) + << "\n";); if (NewSrcAlignment > MTI->getSourceAlignment()) { MTI->setSourceAlignment(NewSrcAlignment);