diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1880,7 +1880,8 @@ // and the number of bits loaded in L is less than or equal to // the number of bits stored in S. return DT.dominates(S, L) && - DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy); + DL.getTypeStoreSize(LTy).getFixedSize() <= + DL.getTypeStoreSize(STy).getFixedSize(); })) return false; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -314,7 +314,7 @@ if (!GEP->isInBounds()) { Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); - if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize) + if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize) Idx = Builder.CreateTrunc(Idx, IntPtrTy); } @@ -487,7 +487,8 @@ // Cast to intptrty in case a truncation occurs. If an extension is needed, // we don't need to bother extending: the extension won't affect where the // computation crosses zero. - if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { + if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() > + IntPtrWidth) { VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy); } return VariableIdx; @@ -942,8 +943,8 @@ Type *LHSIndexTy = LOffset->getType(); Type *RHSIndexTy = ROffset->getType(); if (LHSIndexTy != RHSIndexTy) { - if (LHSIndexTy->getPrimitiveSizeInBits() < - RHSIndexTy->getPrimitiveSizeInBits()) { + if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() < + RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) { ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy); } else LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy); diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -896,8 +896,8 @@ // If we're going to need to zero extend the BE count, check if we can add // one to it prior to zero extending without overflow. Provided this is safe, // it allows better simplification of the +1. - if (DL->getTypeSizeInBits(BECount->getType()) < - DL->getTypeSizeInBits(IntPtr) && + if (DL->getTypeSizeInBits(BECount->getType()).getFixedSize() < + DL->getTypeSizeInBits(IntPtr).getFixedSize() && SE->isLoopEntryGuardedByCond( CurLoop, ICmpInst::ICMP_NE, BECount, SE->getNegativeSCEV(SE->getOne(BECount->getType())))) { diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp --- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp +++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp @@ -439,8 +439,8 @@ Type *RangeCheckType) { if (!EnableIVTruncation) return false; - assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()) > - DL.getTypeSizeInBits(RangeCheckType) && + assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()).getFixedSize() > + DL.getTypeSizeInBits(RangeCheckType).getFixedSize() && "Expected latch check IV type to be larger than range check operand " "type!"); // The start and end values of the IV should be known. This is to guarantee @@ -460,7 +460,8 @@ // The active bits should be less than the bits in the RangeCheckType. This // guarantees that truncating the latch check to RangeCheckType is a safe // operation. - auto RangeCheckTypeBitSize = DL.getTypeSizeInBits(RangeCheckType); + auto RangeCheckTypeBitSize = + DL.getTypeSizeInBits(RangeCheckType).getFixedSize(); return Start->getAPInt().getActiveBits() < RangeCheckTypeBitSize && Limit->getAPInt().getActiveBits() < RangeCheckTypeBitSize; } @@ -477,7 +478,8 @@ if (RangeCheckType == LatchType) return LatchCheck; // For now, bail out if latch type is narrower than range type. - if (DL.getTypeSizeInBits(LatchType) < DL.getTypeSizeInBits(RangeCheckType)) + if (DL.getTypeSizeInBits(LatchType).getFixedSize() < + DL.getTypeSizeInBits(RangeCheckType).getFixedSize()) return None; if (!isSafeToTruncateWideIVType(DL, SE, LatchCheck, RangeCheckType)) return None; diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp --- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp @@ -375,8 +375,8 @@ // Replace the I-th index with LHS. IndexExprs[I] = SE->getSCEV(LHS); if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) && - DL->getTypeSizeInBits(LHS->getType()) < - DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) { + DL->getTypeSizeInBits(LHS->getType()).getFixedSize() < + DL->getTypeSizeInBits(GEP->getOperand(I)->getType()).getFixedSize()) { // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to // zext if the source operand is proved non-negative. We should do that // consistently so that CandidateExpr more likely appears before. See diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -2020,8 +2020,8 @@ // Put pointers at the back and make sure pointer < pointer = false. if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); - return RHS->getType()->getPrimitiveSizeInBits() < - LHS->getType()->getPrimitiveSizeInBits(); + return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() < + LHS->getType()->getPrimitiveSizeInBits().getFixedSize(); }); unsigned NumElim = 0;