diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -738,6 +738,12 @@
         static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
   }
 
+  /// Return true if the memory object referred to by V can by freed in the
+  /// scope for which the SSA value defining the allocation is statically
+  /// defined.  E.g.  deallocation after the static scope of a value does not
+  /// count, but a deallocation before that does.
+  bool canBeFreed() const;
+
   /// Returns the number of bytes known to be dereferenceable for the
   /// pointer value.
   ///
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -162,19 +162,11 @@
     Opts.RoundToAlign = false;
     Opts.NullIsUnknownSize = true;
     uint64_t ObjSize;
-    // TODO: Plumb through TLI so that malloc routines and such working.
+    // TODO: Plumb through TLI so that malloc routines and such work.
     if (getObjectSize(V, ObjSize, DL, nullptr, Opts)) {
       APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
       if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
-          isKnownNonZero(V, DL, 0, nullptr, CtxI, DT) &&
-          // TODO: We're currently inconsistent about whether deref(N) is a
-          // global fact or a point in time fact.  Once D61652 eventually
-          // lands, this check will be restricted to the point in time
-          // variant. For that variant, we need to prove that object hasn't
-          // been conditionally freed before ontext instruction - if it has, we
-          // might be hoisting over the inverse conditional and creating a
-          // dynamic use after free. 
-          !PointerMayBeCapturedBefore(V, true, true, CtxI, DT, true)) {
+          isKnownNonZero(V, DL, 0, nullptr, CtxI, DT) && !V->canBeFreed()) {
         // As we recursed through GEPs to get here, we've incrementally
         // checked that each step advanced by a multiple of the alignment. If
         // our base is properly aligned, then the original offset accessed
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -728,27 +728,24 @@
   return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
 }
 
-// Return true if the memory object referred to by V can by freed in the scope
-// for which the SSA value defining the allocation is statically defined.  E.g.
-// deallocation after the static scope of a value does not count.
-static bool canBeFreed(const Value *V) {
-  assert(V->getType()->isPointerTy());
+bool Value::canBeFreed() const {
+  assert(getType()->isPointerTy());
 
   // Cases that can simply never be deallocated
   // *) Constants aren't allocated per se, thus not deallocated either.
-  if (isa<Constant>(V))
+  if (isa<Constant>(this))
     return false;
 
   // Handle byval/byref/sret/inalloca/preallocated arguments.  The storage
   // lifetime is guaranteed to be longer than the callee's lifetime.
-  if (auto *A = dyn_cast<Argument>(V))
+  if (auto *A = dyn_cast<Argument>(this))
     if (A->hasPointeeInMemoryValueAttr())
       return false;
 
   const Function *F = nullptr;
-  if (auto *I = dyn_cast<Instruction>(V))
+  if (auto *I = dyn_cast<Instruction>(this))
     F = I->getFunction();
-  if (auto *A = dyn_cast<Argument>(V))
+  if (auto *A = dyn_cast<Argument>(this))
     F = A->getParent();
 
   if (!F)
@@ -774,7 +771,7 @@
   if (GCName != StatepointExampleName)
     return true;
 
-  auto *PT = cast<PointerType>(V->getType());
+  auto *PT = cast<PointerType>(this->getType());
   if (PT->getAddressSpace() != 1)
     // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
     // GC managed heap.  This must match the same check in
@@ -791,7 +788,6 @@
   return false;
 }
 
-
 uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
                                                bool &CanBeNull,
                                                bool &CanBeFreed) const {
@@ -799,7 +795,7 @@
 
   uint64_t DerefBytes = 0;
   CanBeNull = false;
-  CanBeFreed = UseDerefAtPointSemantics && canBeFreed(this);
+  CanBeFreed = UseDerefAtPointSemantics && canBeFreed();
   if (const Argument *A = dyn_cast<Argument>(this)) {
     DerefBytes = A->getDereferenceableBytes();
     if (DerefBytes == 0) {
diff --git a/llvm/test/Transforms/LICM/hoist-alloc.ll b/llvm/test/Transforms/LICM/hoist-alloc.ll
--- a/llvm/test/Transforms/LICM/hoist-alloc.ll
+++ b/llvm/test/Transforms/LICM/hoist-alloc.ll
@@ -324,21 +324,19 @@
 
 declare noalias i8* @my_alloc(i64) allocsize(0)
 
-; FIXME: While the result shown here is correct for the test case as written,
-; it would require context sensitive reasoning about frees which we don't
-; currently have to reach this result.  So, this is effectively demonstrating
-; a miscompile which needs investigated further.
+; We would need context sensitive reasoning about frees (which we don't
+; don't currently have) to hoist the load in this example.
 define i8 @test_hoist_allocsize() {
 ; CHECK-LABEL: @test_hoist_allocsize(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A_RAW:%.*]] = call nonnull i8* @my_alloc(i64 32)
 ; CHECK-NEXT:    call void @init(i8* [[A_RAW]])
 ; CHECK-NEXT:    [[ADDR:%.*]] = getelementptr i8, i8* [[A_RAW]], i32 31
-; CHECK-NEXT:    [[RES:%.*]] = load i8, i8* [[ADDR]], align 1
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    call void @unknown()
+; CHECK-NEXT:    [[RES:%.*]] = load i8, i8* [[ADDR]], align 1
 ; CHECK-NEXT:    call void @use(i8 [[RES]])
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 200
@@ -368,7 +366,7 @@
   ret i8 %res
 }
 
-define i8 @test_hoist_allocsize_leak() {
+define i8 @test_hoist_allocsize_leak() nofree nosync {
 ; CHECK-LABEL: @test_hoist_allocsize_leak(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A_RAW:%.*]] = call nonnull i8* @my_alloc(i64 32)
diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
--- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
@@ -2214,7 +2214,7 @@
 declare align 8 dereferenceable_or_null(8) i8* @my_alloc(i32) allocsize(0)
 declare align 8 dereferenceable_or_null(8) i8* @my_array_alloc(i32, i32) allocsize(0, 1)
 
-define i32 @test_allocsize(i64 %len, i1* %test_base) {
+define i32 @test_allocsize(i64 %len, i1* %test_base) nofree nosync {
 ; CHECK-LABEL: @test_allocsize(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ALLOCATION:%.*]] = call nonnull i8* @my_alloc(i32 16384)
@@ -2382,7 +2382,7 @@
 }
 
 
-define i32 @test_allocsize_array(i64 %len, i1* %test_base) {
+define i32 @test_allocsize_array(i64 %len, i1* %test_base) nofree nosync {
 ; CHECK-LABEL: @test_allocsize_array(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ALLOCATION:%.*]] = call nonnull i8* @my_array_alloc(i32 4096, i32 4)