@@ -50,7 +50,7 @@ static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
50
50
// / Test if V is always a pointer to allocated and suitably aligned memory for
51
51
// / a simple load or store.
52
52
static bool isDereferenceableAndAlignedPointer (
53
- const Value *V, unsigned Align, const APInt &Size , const DataLayout &DL,
53
+ const Value *V, Align Alignment , const APInt &Size , const DataLayout &DL,
54
54
const Instruction *CtxI, const DominatorTree *DT,
55
55
SmallPtrSetImpl<const Value *> &Visited) {
56
56
// Already visited? Bail out, we've likely hit unreachable code.
@@ -62,8 +62,8 @@ static bool isDereferenceableAndAlignedPointer(
62
62
63
63
// bitcast instructions are no-ops as far as dereferenceability is concerned.
64
64
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
65
- return isDereferenceableAndAlignedPointer (BC->getOperand (0 ), Align, Size ,
66
- DL, CtxI, DT, Visited);
65
+ return isDereferenceableAndAlignedPointer (BC->getOperand (0 ), Alignment ,
66
+ Size , DL, CtxI, DT, Visited);
67
67
68
68
bool CheckForNonNull = false ;
69
69
APInt KnownDerefBytes (Size .getBitWidth (),
@@ -76,7 +76,7 @@ static bool isDereferenceableAndAlignedPointer(
76
76
Type *Ty = V->getType ();
77
77
assert (Ty->isSized () && " must be sized" );
78
78
APInt Offset (DL.getTypeStoreSizeInBits (Ty), 0 );
79
- return isAligned (V, Offset, llvm::Align (Align) , DL);
79
+ return isAligned (V, Offset, Alignment , DL);
80
80
}
81
81
82
82
// For GEPs, determine if the indexing lands within the allocated object.
@@ -85,7 +85,8 @@ static bool isDereferenceableAndAlignedPointer(
85
85
86
86
APInt Offset (DL.getIndexTypeSizeInBits (GEP->getType ()), 0 );
87
87
if (!GEP->accumulateConstantOffset (DL, Offset) || Offset.isNegative () ||
88
- !Offset.urem (APInt (Offset.getBitWidth (), Align)).isMinValue ())
88
+ !Offset.urem (APInt (Offset.getBitWidth (), Alignment.value ()))
89
+ .isMinValue ())
89
90
return false ;
90
91
91
92
// If the base pointer is dereferenceable for Offset+Size bytes, then the
@@ -97,72 +98,69 @@ static bool isDereferenceableAndAlignedPointer(
97
98
// Offset and Size may have different bit widths if we have visited an
98
99
// addrspacecast, so we can't do arithmetic directly on the APInt values.
99
100
return isDereferenceableAndAlignedPointer (
100
- Base, Align , Offset + Size .sextOrTrunc (Offset.getBitWidth ()),
101
- DL, CtxI, DT, Visited);
101
+ Base, Alignment , Offset + Size .sextOrTrunc (Offset.getBitWidth ()), DL ,
102
+ CtxI, DT, Visited);
102
103
}
103
104
104
105
// For gc.relocate, look through relocations
105
106
if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
106
107
return isDereferenceableAndAlignedPointer (
107
- RelocateInst->getDerivedPtr (), Align , Size , DL, CtxI, DT, Visited);
108
+ RelocateInst->getDerivedPtr (), Alignment , Size , DL, CtxI, DT, Visited);
108
109
109
110
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
110
- return isDereferenceableAndAlignedPointer (ASC->getOperand (0 ), Align, Size ,
111
- DL, CtxI, DT, Visited);
111
+ return isDereferenceableAndAlignedPointer (ASC->getOperand (0 ), Alignment ,
112
+ Size , DL, CtxI, DT, Visited);
112
113
113
114
if (const auto *Call = dyn_cast<CallBase>(V))
114
115
if (auto *RP = getArgumentAliasingToReturnedPointer (Call, true ))
115
- return isDereferenceableAndAlignedPointer (RP, Align , Size , DL, CtxI, DT ,
116
- Visited);
116
+ return isDereferenceableAndAlignedPointer (RP, Alignment , Size , DL, CtxI,
117
+ DT, Visited);
117
118
118
119
// If we don't know, assume the worst.
119
120
return false ;
120
121
}
121
122
122
- bool llvm::isDereferenceableAndAlignedPointer (const Value *V, unsigned Align,
123
+ bool llvm::isDereferenceableAndAlignedPointer (const Value *V, Align Alignment ,
123
124
const APInt &Size ,
124
125
const DataLayout &DL,
125
126
const Instruction *CtxI,
126
127
const DominatorTree *DT) {
127
- assert (Align != 0 && " expected explicitly set alignment" );
128
128
// Note: At the moment, Size can be zero. This ends up being interpreted as
129
129
// a query of whether [Base, V] is dereferenceable and V is aligned (since
130
130
// that's what the implementation happened to do). It's unclear if this is
131
131
// the desired semantic, but at least SelectionDAG does exercise this case.
132
132
133
133
SmallPtrSet<const Value *, 32 > Visited;
134
- return ::isDereferenceableAndAlignedPointer (V, Align , Size , DL, CtxI, DT,
134
+ return ::isDereferenceableAndAlignedPointer (V, Alignment , Size , DL, CtxI, DT,
135
135
Visited);
136
136
}
137
137
138
138
bool llvm::isDereferenceableAndAlignedPointer (const Value *V, Type *Ty,
139
- unsigned Align ,
139
+ MaybeAlign MA ,
140
140
const DataLayout &DL,
141
141
const Instruction *CtxI,
142
142
const DominatorTree *DT) {
143
+ if (!Ty->isSized ())
144
+ return false ;
145
+
143
146
// When dereferenceability information is provided by a dereferenceable
144
147
// attribute, we know exactly how many bytes are dereferenceable. If we can
145
148
// determine the exact offset to the attributed variable, we can use that
146
149
// information here.
147
150
148
151
// Require ABI alignment for loads without alignment specification
149
- if (Align == 0 )
150
- Align = DL.getABITypeAlignment (Ty);
151
-
152
- if (!Ty->isSized ())
153
- return false ;
154
-
152
+ const Align Alignment = DL.getValueOrABITypeAlignment (MA, Ty);
155
153
APInt AccessSize (DL.getIndexTypeSizeInBits (V->getType ()),
156
154
DL.getTypeStoreSize (Ty));
157
- return isDereferenceableAndAlignedPointer (V, Align , AccessSize,
158
- DL, CtxI, DT);
155
+ return isDereferenceableAndAlignedPointer (V, Alignment , AccessSize, DL, CtxI ,
156
+ DT);
159
157
}
160
158
161
159
bool llvm::isDereferenceablePointer (const Value *V, Type *Ty,
162
160
const DataLayout &DL,
163
161
const Instruction *CtxI,
164
162
const DominatorTree *DT) {
165
- return isDereferenceableAndAlignedPointer (V, Ty, 1 , DL, CtxI, DT);
163
+ return isDereferenceableAndAlignedPointer (V, Ty, Align::None () , DL, CtxI, DT);
166
164
}
167
165
168
166
// / Test if A and B will obviously have the same value.
@@ -204,17 +202,16 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
204
202
205
203
APInt EltSize (DL.getIndexTypeSizeInBits (Ptr ->getType ()),
206
204
DL.getTypeStoreSize (LI->getType ()));
207
- unsigned Align = LI->getAlignment ();
208
- if (Align == 0 )
209
- Align = DL.getABITypeAlignment (LI->getType ());
205
+ const Align Alignment = DL.getValueOrABITypeAlignment (
206
+ MaybeAlign (LI->getAlignment ()), LI->getType ());
210
207
211
208
Instruction *HeaderFirstNonPHI = L->getHeader ()->getFirstNonPHI ();
212
209
213
210
// If given a uniform (i.e. non-varying) address, see if we can prove the
214
211
// access is safe within the loop w/o needing predication.
215
212
if (L->isLoopInvariant (Ptr ))
216
- return isDereferenceableAndAlignedPointer (Ptr , Align , EltSize, DL,
217
- HeaderFirstNonPHI, &DT);
213
+ return isDereferenceableAndAlignedPointer (Ptr , Alignment , EltSize, DL,
214
+ HeaderFirstNonPHI, &DT);
218
215
219
216
// Otherwise, check to see if we have a repeating access pattern where we can
220
217
// prove that all accesses are well aligned and dereferenceable.
@@ -245,10 +242,10 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
245
242
// For the moment, restrict ourselves to the case where the access size is a
246
243
// multiple of the requested alignment and the base is aligned.
247
244
// TODO: generalize if a case found which warrants
248
- if (EltSize.urem (Align ) != 0 )
245
+ if (EltSize.urem (Alignment. value () ) != 0 )
249
246
return false ;
250
- return isDereferenceableAndAlignedPointer (Base, Align , AccessSize,
251
- DL, HeaderFirstNonPHI, &DT);
247
+ return isDereferenceableAndAlignedPointer (Base, Alignment , AccessSize, DL ,
248
+ HeaderFirstNonPHI, &DT);
252
249
}
253
250
254
251
// / Check if executing a load of this pointer value cannot trap.
@@ -262,18 +259,17 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
262
259
// /
263
260
// / This uses the pointee type to determine how many bytes need to be safe to
264
261
// / load from the pointer.
265
- bool llvm::isSafeToLoadUnconditionally (Value *V, unsigned Align , APInt &Size ,
262
+ bool llvm::isSafeToLoadUnconditionally (Value *V, MaybeAlign MA , APInt &Size ,
266
263
const DataLayout &DL,
267
264
Instruction *ScanFrom,
268
265
const DominatorTree *DT) {
269
266
// Zero alignment means that the load has the ABI alignment for the target
270
- if (Align == 0 )
271
- Align = DL.getABITypeAlignment (V->getType ()->getPointerElementType ());
272
- assert (isPowerOf2_32 (Align));
267
+ const Align Alignment =
268
+ DL.getValueOrABITypeAlignment (MA, V->getType ()->getPointerElementType ());
273
269
274
270
// If DT is not specified we can't make context-sensitive query
275
271
const Instruction* CtxI = DT ? ScanFrom : nullptr ;
276
- if (isDereferenceableAndAlignedPointer (V, Align , Size , DL, CtxI, DT))
272
+ if (isDereferenceableAndAlignedPointer (V, Alignment , Size , DL, CtxI, DT))
277
273
return true ;
278
274
279
275
if (!ScanFrom)
@@ -305,28 +301,29 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
305
301
return false ;
306
302
307
303
Value *AccessedPtr;
308
- unsigned AccessedAlign ;
304
+ MaybeAlign MaybeAccessedAlign ;
309
305
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
310
306
// Ignore volatile loads. The execution of a volatile load cannot
311
307
// be used to prove an address is backed by regular memory; it can,
312
308
// for example, point to an MMIO register.
313
309
if (LI->isVolatile ())
314
310
continue ;
315
311
AccessedPtr = LI->getPointerOperand ();
316
- AccessedAlign = LI->getAlignment ();
312
+ MaybeAccessedAlign = MaybeAlign ( LI->getAlignment () );
317
313
} else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
318
314
// Ignore volatile stores (see comment for loads).
319
315
if (SI->isVolatile ())
320
316
continue ;
321
317
AccessedPtr = SI->getPointerOperand ();
322
- AccessedAlign = SI->getAlignment ();
318
+ MaybeAccessedAlign = MaybeAlign ( SI->getAlignment () );
323
319
} else
324
320
continue ;
325
321
326
322
Type *AccessedTy = AccessedPtr->getType ()->getPointerElementType ();
327
- if (AccessedAlign == 0 )
328
- AccessedAlign = DL.getABITypeAlignment (AccessedTy);
329
- if (AccessedAlign < Align)
323
+
324
+ const Align AccessedAlign =
325
+ DL.getValueOrABITypeAlignment (MaybeAccessedAlign, AccessedTy);
326
+ if (AccessedAlign < Alignment)
330
327
continue ;
331
328
332
329
// Handle trivial cases.
@@ -341,12 +338,12 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
341
338
return false ;
342
339
}
343
340
344
- bool llvm::isSafeToLoadUnconditionally (Value *V, Type *Ty, unsigned Align ,
341
+ bool llvm::isSafeToLoadUnconditionally (Value *V, Type *Ty, MaybeAlign Alignment ,
345
342
const DataLayout &DL,
346
343
Instruction *ScanFrom,
347
344
const DominatorTree *DT) {
348
345
APInt Size (DL.getIndexTypeSizeInBits (V->getType ()), DL.getTypeStoreSize (Ty));
349
- return isSafeToLoadUnconditionally (V, Align , Size , DL, ScanFrom, DT);
346
+ return isSafeToLoadUnconditionally (V, Alignment , Size , DL, ScanFrom, DT);
350
347
}
351
348
352
349
// / DefMaxInstsToScan - the default number of maximum instructions
0 commit comments