Index: llvm/include/llvm/Analysis/ConstantFolding.h
===================================================================
--- llvm/include/llvm/Analysis/ConstantFolding.h
+++ llvm/include/llvm/Analysis/ConstantFolding.h
@@ -19,6 +19,8 @@
 #ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
 #define LLVM_ANALYSIS_CONSTANTFOLDING_H
 
+#include <stdint.h>
+
 namespace llvm {
 class APInt;
 template <typename T> class ArrayRef;
@@ -28,6 +30,7 @@
 class DataLayout;
 class Function;
 class GlobalValue;
+class GlobalVariable;
 class Instruction;
 class TargetLibraryInfo;
 class Type;
@@ -172,6 +175,8 @@
 /// Check whether the given call has no side-effects.
 /// Specifically checks for math routimes which sometimes set errno.
 bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI);
+
+Constant* ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset);
 }
 
 #endif
Index: llvm/lib/Analysis/ConstantFolding.cpp
===================================================================
--- llvm/lib/Analysis/ConstantFolding.cpp
+++ llvm/lib/Analysis/ConstantFolding.cpp
@@ -633,6 +633,33 @@
   return ConstantInt::get(IntType->getContext(), ResultVal);
 }
 
+}   // anonymous namespace
+
+// If GV is a constant with an initializer read its representation starting
+// at Offset and return it as a constant array of unsigned char.  Otherwise
+// return null.
+Constant* llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
+                                        uint64_t Offset)
+{
+  if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
+    return nullptr;
+
+  const DataLayout &DL = GV->getParent()->getDataLayout();
+  Constant *Init = const_cast<Constant*>(GV->getInitializer());
+  TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
+  if (InitSize < Offset)
+    return nullptr;
+
+  size_t NBytes = InitSize - Offset;
+  SmallVector<unsigned char, 256> RawBytes(NBytes);
+  unsigned char *CurPtr = RawBytes.data();
+
+  if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
+    return nullptr;
+
+  return ConstantDataArray::get(GV->getContext(), RawBytes);
+}
+
 /// If this Offset points exactly to the start of an aggregate element, return
 /// that element, otherwise return nullptr.
 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
@@ -661,8 +688,6 @@
   return C;
 }
 
-} // end anonymous namespace
-
 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
                                           const APInt &Offset,
                                           const DataLayout &DL) {
Index: llvm/lib/Analysis/ValueTracking.cpp
===================================================================
--- llvm/lib/Analysis/ValueTracking.cpp
+++ llvm/lib/Analysis/ValueTracking.cpp
@@ -26,6 +26,7 @@
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumeBundleQueries.h"
 #include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/EHPersonalities.h"
 #include "llvm/Analysis/GuardUtils.h"
 #include "llvm/Analysis/InstructionSimplify.h"
@@ -4095,6 +4096,10 @@
   return true;
 }
 
+// If V refers to an initialized global constant, set Slice either to
+// its initializer if the size of its elements equals ElementSize, or,
+// for ElementSize == 8, to its representation as an array of unsiged
+// char. Return true on success.
 bool llvm::getConstantDataArrayInfo(const Value *V,
                                     ConstantDataArraySlice &Slice,
                                     unsigned ElementSize, uint64_t Offset) {
@@ -4106,21 +4111,29 @@
   // If the value is a GEP instruction or constant expression, treat it as an
   // offset.
   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
-    // The GEP operator should be based on a pointer to string constant, and is
-    // indexing into the string constant.
-    if (!isGEPBasedOnPointerToString(GEP, ElementSize))
+    // Fail if the first GEP operand is not a constant zero and we're
+    // not indexing into the initializer.
+    const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
+    if (!FirstIdx || !FirstIdx->isZero())
       return false;
 
-    // If the second index isn't a ConstantInt, then this is a variable index
-    // into the array.  If this occurs, we can't say anything meaningful about
-    // the string.
-    uint64_t StartIdx = 0;
-    if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
-      StartIdx = CI->getZExtValue();
-    else
+    Value *Op0 = GEP->getOperand(0);
+    const GlobalVariable *GV = dyn_cast<GlobalVariable>(Op0);
+    if (!GV)
+      return false;
+
+    // Fail if the offset into the initializer is not constant.
+    const DataLayout &DL = GV->getParent()->getDataLayout();
+    APInt Off(DL.getIndexSizeInBits(GEP->getPointerAddressSpace()), 0);
+    if (!GEP->accumulateConstantOffset(DL, Off))
+      return false;
+
+    // Fail if the constant offset is excessive.
+    uint64_t StartIdx = Off.getLimitedValue();
+    if (StartIdx == UINT64_MAX)
       return false;
-    return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
-                                    StartIdx + Offset);
+
+    return getConstantDataArrayInfo(Op0, Slice, ElementSize, StartIdx + Offset);
   }
 
   // The GEP instruction, constant or instruction, must reference a global
@@ -4130,15 +4143,16 @@
   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
     return false;
 
-  const ConstantDataArray *Array;
+  Type *GVTy = GV->getValueType();
+  const DataLayout &DL = GV->getParent()->getDataLayout();
+
+  ConstantDataArray *Array = nullptr;
   ArrayType *ArrayTy;
+
   if (GV->getInitializer()->isNullValue()) {
-    Type *GVTy = GV->getValueType();
-    if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
-      // A zeroinitializer for the array; there is no ConstantDataArray.
-      Array = nullptr;
-    } else {
-      const DataLayout &DL = GV->getParent()->getDataLayout();
+    if ((ArrayTy = dyn_cast<ArrayType>(GVTy)))
+      ; // A zeroinitializer for the array; there is no ConstantDataArray.
+    else {
       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
       uint64_t Length = SizeInBytes / (ElementSize / 8);
       if (Length <= Offset)
@@ -4150,10 +4164,30 @@
       return true;
     }
   } else {
-    // This must be a ConstantDataArray.
-    Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
-    if (!Array)
-      return false;
+    auto *Init = const_cast<Constant*>(GV->getInitializer());
+    if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
+      Type *InitElTy = ArrayInit->getElementType();
+      uint64_t InitElSize = DL.getTypeStoreSize(InitElTy).getFixedSize();
+      if (InitElSize == ElementSize / 8)
+        // If Init is an initializer for an array of the expected size,
+        // use it as is.
+        Array = ArrayInit;
+    }
+
+    if (!Array) {
+      if (ElementSize != 8)
+        return false;
+
+      // Otherwise extract the whole initializer as an array of bytes.
+      Init = ReadByteArrayFromGlobal(GV, 0);
+      if (!Init)
+        return false;
+
+      Array = dyn_cast<ConstantDataArray>(Init);
+      if (!Array)
+        return false;
+    }
+
     ArrayTy = Array->getType();
   }
   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
Index: llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
===================================================================
--- llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -687,6 +687,7 @@
   // very useful because calling strlen for a pointer of other types is
   // very uncommon.
   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) {
+    // TODO: Handle subobjects.
     if (!isGEPBasedOnPointerToString(GEP, CharSize))
       return nullptr;
 
@@ -1143,7 +1144,6 @@
   }
 
   // Constant folding: memcmp(x, y, Len) -> constant (all arguments are const).
-  // TODO: This is limited to i8 arrays.
   StringRef LHSStr, RHSStr;
   if (getConstantStringInfo(LHS, LHSStr) &&
       getConstantStringInfo(RHS, RHSStr)) {
Index: llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
===================================================================
--- llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
+++ llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
@@ -1,13 +1,13 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s
 ; <rdar://problem/11294426>
 
-@b = private unnamed_addr constant [3 x i32] [i32 1768775988, i32 1685481784, i32 1836253201], align 4
+@b = common unnamed_addr global [3 x i32] zeroinitializer, align 4
 
 ; The important thing for this test is that we need an unaligned load of `l_b'
 ; ("ldr w2, [x1, #8]" in this case).
 
-; CHECK:      adrp x[[PAGE:[0-9]+]], {{l_b@PAGE|.Lb}}
-; CHECK: add  x[[ADDR:[0-9]+]], x[[PAGE]], {{l_b@PAGEOFF|:lo12:.Lb}}
+; CHECK:      adrp x[[PAGE:[0-9]+]], :got:b
+; CHECK-NEXT: ldr  x[[PAGE]], [x[[ADDR:[0-9]+]], :got_lo12:b]
 ; CHECK-NEXT: ldr  [[VAL2:x[0-9]+]], [x[[ADDR]]]
 ; CHECK-NEXT: ldr  [[VAL:w[0-9]+]], [x[[ADDR]], #8]
 ; CHECK-NEXT: str  [[VAL]], [x0, #8]
Index: llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
===================================================================
--- llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
+++ llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
@@ -3,11 +3,11 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv6m-arm-linux-gnueabi"
 
-@fn1.a = private unnamed_addr constant [4 x i16] [i16 6, i16 0, i16 0, i16 0], align 2
+@fn1.a = private unnamed_addr global [4 x i16] [i16 6, i16 0, i16 0, i16 0], align 2
 
 ; We must not try and emit this bad instruction: "ldrh r1, .LCPI0_0"
 ; CHECK-LABEL: fn1:
-; CHECK: adr [[base:r[0-9]+]], .LCPI0_0
+; CHECK: ldr [[base:r[0-9]+]], .LCPI0_0
 ; CHECK-NOT: ldrh {{r[0-9]+}}, .LCPI0_0
 ; CHECK: ldrh r{{[0-9]+}}, [[[base]]]
 define hidden i32 @fn1() #0 {
Index: llvm/test/CodeGen/BPF/remove_truncate_5.ll
===================================================================
--- llvm/test/CodeGen/BPF/remove_truncate_5.ll
+++ llvm/test/CodeGen/BPF/remove_truncate_5.ll
@@ -25,12 +25,11 @@
   call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %2) #3
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 %2, i8* align 4 bitcast (%struct.test_t* @test.t to i8*), i64 16, i1 false)
 ; CHECK: r1 = 0
-; CHECK: r1 <<= 32
-; CHECK: r2 = r1
-; CHECK: r2 |= 0
-; CHECK: *(u64 *)(r10 - 8) = r2
-; CHECK: r1 |= 5
+; CHECK: *(u64 *)(r10 - 8) = r1
+; CHECK: r1 = 5
 ; CHECK: *(u64 *)(r10 - 16) = r1
+; CHECK: r1 = r10
+; CHECK: r1 += -16
   call void @foo(i8* nonnull %2) #3
 ; CHECK: call foo
   call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %2) #3
Index: llvm/test/CodeGen/BPF/rodata_2.ll
===================================================================
--- llvm/test/CodeGen/BPF/rodata_2.ll
+++ llvm/test/CodeGen/BPF/rodata_2.ll
@@ -32,15 +32,18 @@
 
 entry:
     tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds (%struct.test_t2, %struct.test_t2* @g, i64 0, i32 0), i8* align 4 getelementptr inbounds (%struct.test_t2, %struct.test_t2* @test.t2, i64 0, i32 0), i64 32, i1 false)
-; CHECK:  r1 = g
-; CHECK:  r2 = 0
-; CHECK:  *(u32 *)(r1 + 28) = r2
-; CHECK:  r3 = 3
-; CHECK:  *(u32 *)(r1 + 24) = r3
-; CHECK:  r3 = 2
-; CHECK:  *(u32 *)(r1 + 20) = r3
-; CHECK:  r3 = 1
-; CHECK:  *(u32 *)(r1 + 16) = r3
+; CHECK: r1 = g ll
+; CHECK: r2 = 3
+; CHECK: *(u32 *)(r1 + 24) = r2
+; CHECK: r2 = 2
+; CHECK: *(u32 *)(r1 + 20) = r2
+; CHECK: r2 = 1
+; CHECK: *(u32 *)(r1 + 16) = r2
+; CHECK: r2 = 0
+; CHECK: *(u32 *)(r1 + 28) = r2
+; CHECK: *(u32 *)(r1 + 8) = r2
+; CHECK: *(u32 *)(r1 + 4) = r2
+; CHECK: *(u32 *)(r1 + 0) = r2
       ret i32 0
 }
 ; CHECK: .section  .rodata.cst32,"aM",@progbits,32
Index: llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
===================================================================
--- llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
+++ llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
@@ -15,123 +15,124 @@
 
   ; MIR32-LABEL: name: caller
   ; MIR32: bb.0.entry:
-  ; MIR32:   renamable $r3 = LWZtoc @__const.caller.t, $r2 :: (load (s32) from got)
-  ; MIR32:   renamable $r4 = LI 31
-  ; MIR32:   renamable $v2 = LVX renamable $r3, killed renamable $r4
-  ; MIR32:   renamable $r4 = LI 16
-  ; MIR32:   renamable $v3 = LVX renamable $r3, killed renamable $r4
-  ; MIR32:   renamable $v4 = LVSL $zero, renamable $r3
-  ; MIR32:   renamable $v2 = VPERM renamable $v3, killed renamable $v2, renamable $v4
-  ; MIR32:   renamable $r4 = LI 172
-  ; MIR32:   STXVW4X killed renamable $v2, $r1, killed renamable $r4 :: (store (s128) into unknown-address + 16, align 4)
-  ; MIR32:   renamable $v2 = LVX $zero, killed renamable $r3
-  ; MIR32:   renamable $v2 = VPERM killed renamable $v2, killed renamable $v3, killed renamable $v4
-  ; MIR32:   renamable $r3 = LI 156
-  ; MIR32:   STXVW4X killed renamable $v2, $r1, killed renamable $r3 :: (store (s128), align 4)
-  ; MIR32:   ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1
-  ; MIR32:   renamable $vsl0 = XXLXORz
-  ; MIR32:   $f1 = XXLXORdpz
-  ; MIR32:   $f2 = XXLXORdpz
-  ; MIR32:   $v2 = XXLXORz
-  ; MIR32:   $v3 = XXLXORz
-  ; MIR32:   $v4 = XXLXORz
-  ; MIR32:   $v5 = XXLXORz
-  ; MIR32:   $v6 = XXLXORz
-  ; MIR32:   $v7 = XXLXORz
-  ; MIR32:   $v8 = XXLXORz
-  ; MIR32:   $v9 = XXLXORz
-  ; MIR32:   $v10 = XXLXORz
-  ; MIR32:   $v11 = XXLXORz
-  ; MIR32:   $v12 = XXLXORz
-  ; MIR32:   $v13 = XXLXORz
-  ; MIR32:   $f3 = XXLXORdpz
-  ; MIR32:   $f4 = XXLXORdpz
-  ; MIR32:   $f5 = XXLXORdpz
-  ; MIR32:   $f6 = XXLXORdpz
-  ; MIR32:   $f7 = XXLXORdpz
-  ; MIR32:   renamable $r3 = LI 136
-  ; MIR32:   $f8 = XXLXORdpz
-  ; MIR32:   renamable $r4 = LI 120
-  ; MIR32:   renamable $r5 = LWZtoc %const.0, $r2 :: (load (s32) from got)
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   $f9 = XXLXORdpz
-  ; MIR32:   renamable $r3 = LI 104
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r4 :: (store (s128), align 8)
-  ; MIR32:   $f10 = XXLXORdpz
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   renamable $r3 = LI 88
-  ; MIR32:   $f11 = XXLXORdpz
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   renamable $r3 = LI 72
-  ; MIR32:   renamable $v0 = LXVD2X $zero, killed renamable $r5 :: (load (s128) from constant-pool)
-  ; MIR32:   $f12 = XXLXORdpz
-  ; MIR32:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   $f13 = XXLXORdpz
-  ; MIR32:   renamable $r5 = LI 48
-  ; MIR32:   renamable $r6 = LI 512
-  ; MIR32:   $r3 = LI 128
-  ; MIR32:   $r4 = LI 256
-  ; MIR32:   STXVD2X killed renamable $v0, $r1, killed renamable $r5 :: (store (s128))
-  ; MIR32:   STW killed renamable $r6, 152, $r1 :: (store (s32))
-  ; MIR32:   BL_NOP <mcsymbol .callee[PR]>, csr_aix32_altivec, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def $f1
-  ; MIR32:   ADJCALLSTACKUP 188, 0, implicit-def dead $r1, implicit $r1
-  ; MIR32:   BLR implicit $lr, implicit $rm, implicit $f1
+  ; MIR32-NEXT:   renamable $r3 = LIS 16392
+  ; MIR32-NEXT:   renamable $r4 = LIS 16384
+  ; MIR32-NEXT:   STW killed renamable $r3, 180, $r1 :: (store (s32) into unknown-address + 24)
+  ; MIR32-NEXT:   renamable $r3 = LI 0
+  ; MIR32-NEXT:   STW killed renamable $r4, 172, $r1 :: (store (s32) into unknown-address + 16)
+  ; MIR32-NEXT:   renamable $r4 = LIS 16368
+  ; MIR32-NEXT:   STW renamable $r3, 184, $r1 :: (store (s32) into unknown-address + 28)
+  ; MIR32-NEXT:   STW renamable $r3, 176, $r1 :: (store (s32) into unknown-address + 20)
+  ; MIR32-NEXT:   STW renamable $r3, 168, $r1 :: (store (s32) into unknown-address + 12)
+  ; MIR32-NEXT:   STW killed renamable $r4, 164, $r1 :: (store (s32) into unknown-address + 8)
+  ; MIR32-NEXT:   STW renamable $r3, 160, $r1 :: (store (s32) into unknown-address + 4)
+  ; MIR32-NEXT:   STW killed renamable $r3, 156, $r1 :: (store (s32))
+  ; MIR32-NEXT:   ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1
+  ; MIR32-NEXT:   renamable $vsl0 = XXLXORz
+  ; MIR32-NEXT:   $f1 = XXLXORdpz
+  ; MIR32-NEXT:   $f2 = XXLXORdpz
+  ; MIR32-NEXT:   $v2 = XXLXORz
+  ; MIR32-NEXT:   $v3 = XXLXORz
+  ; MIR32-NEXT:   $v4 = XXLXORz
+  ; MIR32-NEXT:   $v5 = XXLXORz
+  ; MIR32-NEXT:   $v6 = XXLXORz
+  ; MIR32-NEXT:   $v7 = XXLXORz
+  ; MIR32-NEXT:   $v8 = XXLXORz
+  ; MIR32-NEXT:   $v9 = XXLXORz
+  ; MIR32-NEXT:   $v10 = XXLXORz
+  ; MIR32-NEXT:   $v11 = XXLXORz
+  ; MIR32-NEXT:   $v12 = XXLXORz
+  ; MIR32-NEXT:   $v13 = XXLXORz
+  ; MIR32-NEXT:   $f3 = XXLXORdpz
+  ; MIR32-NEXT:   $f4 = XXLXORdpz
+  ; MIR32-NEXT:   $f5 = XXLXORdpz
+  ; MIR32-NEXT:   $f6 = XXLXORdpz
+  ; MIR32-NEXT:   $f7 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r3 = LI 136
+  ; MIR32-NEXT:   $f8 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r4 = LI 120
+  ; MIR32-NEXT:   renamable $r5 = LWZtoc %const.0, $r2 :: (load (s32) from got)
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   $f9 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r3 = LI 104
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r4 :: (store (s128), align 8)
+  ; MIR32-NEXT:   $f10 = XXLXORdpz
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   renamable $r3 = LI 88
+  ; MIR32-NEXT:   $f11 = XXLXORdpz
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   renamable $r3 = LI 72
+  ; MIR32-NEXT:   renamable $v0 = LXVD2X $zero, killed renamable $r5 :: (load (s128) from constant-pool)
+  ; MIR32-NEXT:   $f12 = XXLXORdpz
+  ; MIR32-NEXT:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   $f13 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r5 = LI 48
+  ; MIR32-NEXT:   renamable $r6 = LI 512
+  ; MIR32-NEXT:   $r3 = LI 128
+  ; MIR32-NEXT:   $r4 = LI 256
+  ; MIR32-NEXT:   STXVD2X killed renamable $v0, $r1, killed renamable $r5 :: (store (s128))
+  ; MIR32-NEXT:   STW killed renamable $r6, 152, $r1 :: (store (s32))
+  ; MIR32-NEXT:   BL_NOP <mcsymbol .callee[PR]>, csr_aix32_altivec, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def $f1
+  ; MIR32-NEXT:   ADJCALLSTACKUP 188, 0, implicit-def dead $r1, implicit $r1
+  ; MIR32-NEXT:   BLR implicit $lr, implicit $rm, implicit $f1
   ; MIR64-LABEL: name: caller
   ; MIR64: bb.0.entry:
-  ; MIR64:   renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load (s64) from got)
-  ; MIR64:   renamable $x4 = LI8 16
-  ; MIR64:   renamable $vsl0 = LXVD2X renamable $x3, killed renamable $x4 :: (load (s128) from unknown-address + 16, align 8)
-  ; MIR64:   renamable $x4 = LI8 208
-  ; MIR64:   STXVD2X killed renamable $vsl0, $x1, killed renamable $x4 :: (store (s128) into unknown-address + 16, align 4)
-  ; MIR64:   renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128), align 8)
-  ; MIR64:   renamable $x3 = LI8 192
-  ; MIR64:   STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 4)
-  ; MIR64:   ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1
-  ; MIR64:   renamable $vsl0 = XXLXORz
-  ; MIR64:   $f1 = XXLXORdpz
-  ; MIR64:   $f2 = XXLXORdpz
-  ; MIR64:   $v2 = XXLXORz
-  ; MIR64:   $v3 = XXLXORz
-  ; MIR64:   $v4 = XXLXORz
-  ; MIR64:   $v5 = XXLXORz
-  ; MIR64:   $v6 = XXLXORz
-  ; MIR64:   $v7 = XXLXORz
-  ; MIR64:   $v8 = XXLXORz
-  ; MIR64:   $v9 = XXLXORz
-  ; MIR64:   $v10 = XXLXORz
-  ; MIR64:   $v11 = XXLXORz
-  ; MIR64:   $v12 = XXLXORz
-  ; MIR64:   $v13 = XXLXORz
-  ; MIR64:   $f3 = XXLXORdpz
-  ; MIR64:   $f4 = XXLXORdpz
-  ; MIR64:   $f5 = XXLXORdpz
-  ; MIR64:   $f6 = XXLXORdpz
-  ; MIR64:   renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
-  ; MIR64:   $f7 = XXLXORdpz
-  ; MIR64:   $f8 = XXLXORdpz
-  ; MIR64:   renamable $x4 = LI8 160
-  ; MIR64:   $f9 = XXLXORdpz
-  ; MIR64:   renamable $x5 = LI8 144
-  ; MIR64:   STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8)
-  ; MIR64:   renamable $vsl13 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool)
-  ; MIR64:   $f10 = XXLXORdpz
-  ; MIR64:   renamable $x3 = LI8 128
-  ; MIR64:   STXVW4X renamable $vsl0, $x1, killed renamable $x5 :: (store (s128), align 8)
-  ; MIR64:   $f11 = XXLXORdpz
-  ; MIR64:   renamable $x4 = LI8 80
-  ; MIR64:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
-  ; MIR64:   $f12 = XXLXORdpz
-  ; MIR64:   STXVD2X killed renamable $vsl13, $x1, killed renamable $x4 :: (store (s128))
-  ; MIR64:   $f13 = XXLXORdpz
-  ; MIR64:   renamable $x5 = LI8 512
-  ; MIR64:   renamable $x6 = LI8 0
-  ; MIR64:   $x3 = LI8 128
-  ; MIR64:   $x4 = LI8 256
-  ; MIR64:   STD killed renamable $x5, 184, $x1 :: (store (s64))
-  ; MIR64:   STD killed renamable $x6, 176, $x1 :: (store (s64))
-  ; MIR64:   BL8_NOP <mcsymbol .callee[PR]>, csr_ppc64_altivec, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $f1, implicit $f2, implicit killed $v2, implicit killed $v3, implicit killed $v4, implicit killed $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $x2, implicit-def $r1, implicit-def $f1
-  ; MIR64:   ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1
-  ; MIR64:   BLR8 implicit $lr8, implicit $rm, implicit $f1
+  ; MIR64-NEXT:   renamable $x3 = LI8 2049
+  ; MIR64-NEXT:   renamable $x4 = LI8 1
+  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 51, 1
+  ; MIR64-NEXT:   renamable $x4 = RLDIC killed renamable $x4, 62, 1
+  ; MIR64-NEXT:   STD killed renamable $x3, 216, $x1 :: (store (s64) into unknown-address + 24, align 4)
+  ; MIR64-NEXT:   renamable $x3 = LI8 1023
+  ; MIR64-NEXT:   STD killed renamable $x4, 208, $x1 :: (store (s64) into unknown-address + 16, align 4)
+  ; MIR64-NEXT:   renamable $x5 = LI8 0
+  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 52, 2
+  ; MIR64-NEXT:   STD renamable $x5, 192, $x1 :: (store (s64), align 4)
+  ; MIR64-NEXT:   STD killed renamable $x3, 200, $x1 :: (store (s64) into unknown-address + 8, align 4)
+  ; MIR64-NEXT:   ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1
+  ; MIR64-NEXT:   renamable $vsl0 = XXLXORz
+  ; MIR64-NEXT:   $f1 = XXLXORdpz
+  ; MIR64-NEXT:   $f2 = XXLXORdpz
+  ; MIR64-NEXT:   $v2 = XXLXORz
+  ; MIR64-NEXT:   $v3 = XXLXORz
+  ; MIR64-NEXT:   $v4 = XXLXORz
+  ; MIR64-NEXT:   $v5 = XXLXORz
+  ; MIR64-NEXT:   $v6 = XXLXORz
+  ; MIR64-NEXT:   $v7 = XXLXORz
+  ; MIR64-NEXT:   $v8 = XXLXORz
+  ; MIR64-NEXT:   $v9 = XXLXORz
+  ; MIR64-NEXT:   $v10 = XXLXORz
+  ; MIR64-NEXT:   $v11 = XXLXORz
+  ; MIR64-NEXT:   $v12 = XXLXORz
+  ; MIR64-NEXT:   $v13 = XXLXORz
+  ; MIR64-NEXT:   $f3 = XXLXORdpz
+  ; MIR64-NEXT:   $f4 = XXLXORdpz
+  ; MIR64-NEXT:   $f5 = XXLXORdpz
+  ; MIR64-NEXT:   $f6 = XXLXORdpz
+  ; MIR64-NEXT:   $f7 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
+  ; MIR64-NEXT:   $f8 = XXLXORdpz
+  ; MIR64-NEXT:   $f9 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x4 = LI8 160
+  ; MIR64-NEXT:   $f10 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x6 = LI8 144
+  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8)
+  ; MIR64-NEXT:   renamable $v0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool)
+  ; MIR64-NEXT:   $f11 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x3 = LI8 128
+  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x6 :: (store (s128), align 8)
+  ; MIR64-NEXT:   $f12 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x4 = LI8 80
+  ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
+  ; MIR64-NEXT:   $f13 = XXLXORdpz
+  ; MIR64-NEXT:   STXVD2X killed renamable $v0, $x1, killed renamable $x4 :: (store (s128))
+  ; MIR64-NEXT:   renamable $x6 = LI8 512
+  ; MIR64-NEXT:   $x3 = LI8 128
+  ; MIR64-NEXT:   $x4 = LI8 256
+  ; MIR64-NEXT:   STD killed renamable $x6, 184, $x1 :: (store (s64))
+  ; MIR64-NEXT:   STD killed renamable $x5, 176, $x1 :: (store (s64))
+  ; MIR64-NEXT:   BL8_NOP <mcsymbol .callee[PR]>, csr_ppc64_altivec, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $x2, implicit-def $r1, implicit-def $f1
+  ; MIR64-NEXT:   ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1
+  ; MIR64-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $f1
   entry:
     %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, %struct.Test* nonnull byval(%struct.Test) align 4 @__const.caller.t)
       ret double %call
Index: llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
===================================================================
--- llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
+++ llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
@@ -16,61 +16,60 @@
 ; 32BIT-NEXT:    mflr 0
 ; 32BIT-NEXT:    stw 0, 8(1)
 ; 32BIT-NEXT:    stwu 1, -192(1)
-; 32BIT-NEXT:    lwz 3, L..C0(2) # @__const.caller.t
-; 32BIT-NEXT:    li 4, 31
+; 32BIT-NEXT:    lis 3, 16392
+; 32BIT-NEXT:    lis 4, 16384
 ; 32BIT-NEXT:    xxlxor 0, 0, 0
-; 32BIT-NEXT:    lwz 5, L..C1(2) # %const.0
+; 32BIT-NEXT:    lwz 5, L..C0(2) # %const.0
 ; 32BIT-NEXT:    li 6, 512
+; 32BIT-NEXT:    stw 3, 180(1)
+; 32BIT-NEXT:    li 3, 0
 ; 32BIT-NEXT:    xxlxor 1, 1, 1
+; 32BIT-NEXT:    stw 4, 172(1)
+; 32BIT-NEXT:    lis 4, 16368
 ; 32BIT-NEXT:    xxlxor 2, 2, 2
-; 32BIT-NEXT:    lvx 2, 3, 4
-; 32BIT-NEXT:    li 4, 16
-; 32BIT-NEXT:    lvsl 4, 0, 3
-; 32BIT-NEXT:    xxlxor 37, 37, 37
-; 32BIT-NEXT:    lvx 3, 3, 4
-; 32BIT-NEXT:    li 4, 172
+; 32BIT-NEXT:    stw 3, 184(1)
+; 32BIT-NEXT:    stw 3, 176(1)
+; 32BIT-NEXT:    xxlxor 34, 34, 34
+; 32BIT-NEXT:    stw 3, 168(1)
+; 32BIT-NEXT:    stw 3, 160(1)
+; 32BIT-NEXT:    xxlxor 35, 35, 35
+; 32BIT-NEXT:    stw 3, 156(1)
+; 32BIT-NEXT:    li 3, 136
 ; 32BIT-NEXT:    lxvd2x 32, 0, 5
+; 32BIT-NEXT:    xxlxor 36, 36, 36
+; 32BIT-NEXT:    stw 4, 164(1)
+; 32BIT-NEXT:    li 4, 120
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    xxlxor 37, 37, 37
+; 32BIT-NEXT:    li 3, 104
+; 32BIT-NEXT:    stxvw4x 0, 1, 4
 ; 32BIT-NEXT:    xxlxor 38, 38, 38
-; 32BIT-NEXT:    xxlxor 39, 39, 39
 ; 32BIT-NEXT:    li 5, 48
-; 32BIT-NEXT:    vperm 2, 3, 2, 4
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 88
+; 32BIT-NEXT:    xxlxor 39, 39, 39
+; 32BIT-NEXT:    li 4, 256
 ; 32BIT-NEXT:    xxlxor 40, 40, 40
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 72
 ; 32BIT-NEXT:    xxlxor 41, 41, 41
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 128
 ; 32BIT-NEXT:    xxlxor 42, 42, 42
+; 32BIT-NEXT:    stxvd2x 32, 1, 5
+; 32BIT-NEXT:    stw 6, 152(1)
 ; 32BIT-NEXT:    xxlxor 43, 43, 43
 ; 32BIT-NEXT:    xxlxor 44, 44, 44
-; 32BIT-NEXT:    stxvw4x 34, 1, 4
-; 32BIT-NEXT:    li 4, 120
 ; 32BIT-NEXT:    xxlxor 45, 45, 45
-; 32BIT-NEXT:    lvx 2, 0, 3
-; 32BIT-NEXT:    li 3, 156
 ; 32BIT-NEXT:    xxlxor 3, 3, 3
 ; 32BIT-NEXT:    xxlxor 4, 4, 4
-; 32BIT-NEXT:    vperm 2, 2, 3, 4
-; 32BIT-NEXT:    xxlxor 35, 35, 35
-; 32BIT-NEXT:    xxlxor 36, 36, 36
 ; 32BIT-NEXT:    xxlxor 5, 5, 5
 ; 32BIT-NEXT:    xxlxor 6, 6, 6
 ; 32BIT-NEXT:    xxlxor 7, 7, 7
-; 32BIT-NEXT:    stxvw4x 34, 1, 3
-; 32BIT-NEXT:    li 3, 136
-; 32BIT-NEXT:    xxlxor 34, 34, 34
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 104
-; 32BIT-NEXT:    stxvw4x 0, 1, 4
-; 32BIT-NEXT:    li 4, 256
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 88
 ; 32BIT-NEXT:    xxlxor 8, 8, 8
 ; 32BIT-NEXT:    xxlxor 9, 9, 9
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 72
 ; 32BIT-NEXT:    xxlxor 10, 10, 10
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 128
 ; 32BIT-NEXT:    xxlxor 11, 11, 11
-; 32BIT-NEXT:    stxvd2x 32, 1, 5
-; 32BIT-NEXT:    stw 6, 152(1)
 ; 32BIT-NEXT:    xxlxor 12, 12, 12
 ; 32BIT-NEXT:    xxlxor 13, 13, 13
 ; 32BIT-NEXT:    bl .callee[PR]
@@ -85,47 +84,49 @@
 ; 64BIT-NEXT:    mflr 0
 ; 64BIT-NEXT:    std 0, 16(1)
 ; 64BIT-NEXT:    stdu 1, -224(1)
-; 64BIT-NEXT:    ld 3, L..C0(2) # @__const.caller.t
-; 64BIT-NEXT:    li 4, 16
-; 64BIT-NEXT:    li 5, 144
+; 64BIT-NEXT:    li 3, 2049
+; 64BIT-NEXT:    li 4, 1
+; 64BIT-NEXT:    xxlxor 0, 0, 0
+; 64BIT-NEXT:    li 5, 0
+; 64BIT-NEXT:    rldic 3, 3, 51, 1
+; 64BIT-NEXT:    rldic 4, 4, 62, 1
 ; 64BIT-NEXT:    xxlxor 1, 1, 1
-; 64BIT-NEXT:    li 6, 0
+; 64BIT-NEXT:    li 6, 144
+; 64BIT-NEXT:    std 3, 216(1)
+; 64BIT-NEXT:    li 3, 1023
 ; 64BIT-NEXT:    xxlxor 2, 2, 2
+; 64BIT-NEXT:    rldic 3, 3, 52, 2
+; 64BIT-NEXT:    std 4, 208(1)
+; 64BIT-NEXT:    li 4, 160
 ; 64BIT-NEXT:    xxlxor 34, 34, 34
-; 64BIT-NEXT:    lxvd2x 0, 3, 4
-; 64BIT-NEXT:    li 4, 208
+; 64BIT-NEXT:    std 3, 200(1)
+; 64BIT-NEXT:    ld 3, L..C0(2) # %const.0
+; 64BIT-NEXT:    std 5, 192(1)
 ; 64BIT-NEXT:    xxlxor 35, 35, 35
 ; 64BIT-NEXT:    xxlxor 36, 36, 36
+; 64BIT-NEXT:    stxvw4x 0, 1, 4
+; 64BIT-NEXT:    li 4, 80
 ; 64BIT-NEXT:    xxlxor 37, 37, 37
-; 64BIT-NEXT:    stxvd2x 0, 1, 4
-; 64BIT-NEXT:    li 4, 160
+; 64BIT-NEXT:    stxvw4x 0, 1, 6
+; 64BIT-NEXT:    li 6, 512
+; 64BIT-NEXT:    lxvd2x 32, 0, 3
 ; 64BIT-NEXT:    xxlxor 38, 38, 38
-; 64BIT-NEXT:    lxvd2x 0, 0, 3
-; 64BIT-NEXT:    li 3, 192
+; 64BIT-NEXT:    li 3, 128
 ; 64BIT-NEXT:    xxlxor 39, 39, 39
+; 64BIT-NEXT:    stxvw4x 0, 1, 3
 ; 64BIT-NEXT:    xxlxor 40, 40, 40
 ; 64BIT-NEXT:    xxlxor 41, 41, 41
-; 64BIT-NEXT:    stxvd2x 0, 1, 3
-; 64BIT-NEXT:    ld 3, L..C1(2) # %const.0
-; 64BIT-NEXT:    xxlxor 0, 0, 0
+; 64BIT-NEXT:    stxvd2x 32, 1, 4
+; 64BIT-NEXT:    li 4, 256
+; 64BIT-NEXT:    std 6, 184(1)
 ; 64BIT-NEXT:    xxlxor 42, 42, 42
-; 64BIT-NEXT:    stxvw4x 0, 1, 4
-; 64BIT-NEXT:    li 4, 80
+; 64BIT-NEXT:    std 5, 176(1)
 ; 64BIT-NEXT:    xxlxor 43, 43, 43
-; 64BIT-NEXT:    lxvd2x 13, 0, 3
-; 64BIT-NEXT:    li 3, 128
 ; 64BIT-NEXT:    xxlxor 44, 44, 44
-; 64BIT-NEXT:    stxvw4x 0, 1, 5
 ; 64BIT-NEXT:    xxlxor 45, 45, 45
-; 64BIT-NEXT:    stxvw4x 0, 1, 3
-; 64BIT-NEXT:    li 5, 512
 ; 64BIT-NEXT:    xxlxor 3, 3, 3
 ; 64BIT-NEXT:    xxlxor 4, 4, 4
-; 64BIT-NEXT:    stxvd2x 13, 1, 4
-; 64BIT-NEXT:    li 4, 256
-; 64BIT-NEXT:    std 5, 184(1)
 ; 64BIT-NEXT:    xxlxor 5, 5, 5
-; 64BIT-NEXT:    std 6, 176(1)
 ; 64BIT-NEXT:    xxlxor 6, 6, 6
 ; 64BIT-NEXT:    xxlxor 7, 7, 7
 ; 64BIT-NEXT:    xxlxor 8, 8, 8
Index: llvm/test/DebugInfo/COFF/types-array.ll
===================================================================
--- llvm/test/DebugInfo/COFF/types-array.ll
+++ llvm/test/DebugInfo/COFF/types-array.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -filetype=obj | llvm-readobj - --codeview | FileCheck %s
 ; RUN: llc < %s | llvm-mc -filetype=obj --triple=i686-windows | llvm-readobj - --codeview | FileCheck %s
 
@@ -71,9 +72,9 @@
 ; CHECK:     DefRangeFramePointerRelSym {
 ; CHECK:       Offset: -20
 ; CHECK:       LocalVariableAddrRange {
-; CHECK:         OffsetStart: .text+0x6
+; CHECK:         OffsetStart: .text+0x9
 ; CHECK:         ISectStart: 0x0
-; CHECK:         Range: 0x33
+; CHECK:         Range: 0x30
 ; CHECK:       }
 ; CHECK:     }
 ; CHECK:     ProcEnd {
Index: llvm/test/Transforms/InstCombine/memchr-5.ll
===================================================================
--- llvm/test/Transforms/InstCombine/memchr-5.ll
+++ llvm/test/Transforms/InstCombine/memchr-5.ll
@@ -15,85 +15,39 @@
 
 define void @fold_memchr_a(i64* %pcmp) {
 ; BE-LABEL: @fold_memchr_a(
-; BE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 97, i64 16)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; BE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([4 x i32]* @a to i64)
-; BE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PB:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 98, i64 16)
-; BE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; BE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PC:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 99, i64 16)
-; BE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; BE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 100, i64 16)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; BE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PN:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 110, i64 16)
-; BE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; BE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PO:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 111, i64 16)
-; BE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; BE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 13, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; BE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; BE-NEXT:    [[PP:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 112, i64 16)
-; BE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; BE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 14, i64* [[PSTOR6]], align 4
 ; BE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; BE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; BE-NEXT:    [[PQ:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 113, i64 16)
-; BE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; BE-NEXT:    store i64 15, i64* [[PSTOR7]], align 4
 ; BE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; BE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memchr_a(
-; LE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 97, i64 16)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; LE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([4 x i32]* @a to i64)
-; LE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PB:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 98, i64 16)
-; LE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; LE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PC:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 99, i64 16)
-; LE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; LE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 100, i64 16)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; LE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PN:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 110, i64 16)
-; LE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; LE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PO:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 111, i64 16)
-; LE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; LE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 14, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; LE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; LE-NEXT:    [[PP:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 112, i64 16)
-; LE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; LE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 13, i64* [[PSTOR6]], align 4
 ; LE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; LE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; LE-NEXT:    [[PQ:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 113, i64 16)
-; LE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; LE-NEXT:    store i64 12, i64* [[PSTOR7]], align 4
 ; LE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; LE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [4 x i32], [4 x i32]* @a, i64 0, i64 0
@@ -171,63 +125,31 @@
 
 define void @fold_memchr_a_p1(i64* %pcmp) {
 ; BE-LABEL: @fold_memchr_a_p1(
-; BE-NEXT:    [[PE:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; BE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; BE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
-; BE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PF:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; BE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; BE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PG:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; BE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; BE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PH:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; BE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; BE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; BE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; BE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memchr_a_p1(
-; LE-NEXT:    [[PE:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; LE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; LE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
-; LE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PF:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; LE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; LE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PG:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; LE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; LE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PH:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; LE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; LE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; LE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [4 x i32], [4 x i32]* @a, i64 0, i64 1
Index: llvm/test/Transforms/InstCombine/memcmp-3.ll
===================================================================
--- llvm/test/Transforms/InstCombine/memcmp-3.ll
+++ llvm/test/Transforms/InstCombine/memcmp-3.ll
@@ -20,54 +20,40 @@
 ; BE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; BE-NEXT:    store i32 0, i32* [[PSTOR1]], align 4
-; BE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 2)
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; BE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 3)
+; BE-NEXT:    store i32 0, i32* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; BE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; BE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 4)
+; BE-NEXT:    store i32 0, i32* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; BE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; BE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 5)
+; BE-NEXT:    store i32 0, i32* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; BE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; BE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 6)
+; BE-NEXT:    store i32 0, i32* [[PSTOR5]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; BE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
-; BE-NEXT:    [[CMP7:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(7) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(7) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 7)
+; BE-NEXT:    store i32 0, i32* [[PSTOR6]], align 4
 ; BE-NEXT:    [[PSTOR7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; BE-NEXT:    store i32 [[CMP7]], i32* [[PSTOR7]], align 4
-; BE-NEXT:    [[CMP8:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 8)
+; BE-NEXT:    store i32 0, i32* [[PSTOR7]], align 4
 ; BE-NEXT:    [[PSTOR8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; BE-NEXT:    store i32 [[CMP8]], i32* [[PSTOR8]], align 4
+; BE-NEXT:    store i32 1, i32* [[PSTOR8]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memcmp_ia6a_i8a(
 ; LE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; LE-NEXT:    store i32 1, i32* [[PSTOR1]], align 4
-; LE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 2)
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; LE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 3)
+; LE-NEXT:    store i32 1, i32* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; LE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; LE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 4)
+; LE-NEXT:    store i32 1, i32* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; LE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; LE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 5)
+; LE-NEXT:    store i32 1, i32* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; LE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; LE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 6)
+; LE-NEXT:    store i32 1, i32* [[PSTOR5]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; LE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
-; LE-NEXT:    [[CMP7:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(7) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(7) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 7)
+; LE-NEXT:    store i32 1, i32* [[PSTOR6]], align 4
 ; LE-NEXT:    [[PSTOR7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; LE-NEXT:    store i32 [[CMP7]], i32* [[PSTOR7]], align 4
-; LE-NEXT:    [[CMP8:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 8)
+; LE-NEXT:    store i32 1, i32* [[PSTOR7]], align 4
 ; LE-NEXT:    [[PSTOR8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; LE-NEXT:    store i32 [[CMP8]], i32* [[PSTOR8]], align 4
+; LE-NEXT:    store i32 1, i32* [[PSTOR8]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [4 x i16], [4 x i16]* @ia6a, i64 0, i64 0
@@ -121,42 +107,32 @@
 ; BE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; BE-NEXT:    store i32 1, i32* [[PSTOR1]], align 4
-; BE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 2)
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; BE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 3)
+; BE-NEXT:    store i32 1, i32* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; BE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; BE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 4)
+; BE-NEXT:    store i32 1, i32* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; BE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; BE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 5)
+; BE-NEXT:    store i32 1, i32* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; BE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; BE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 6)
+; BE-NEXT:    store i32 1, i32* [[PSTOR5]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; BE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
+; BE-NEXT:    store i32 1, i32* [[PSTOR6]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memcmp_ia6a_p1_i8a_p1(
 ; LE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; LE-NEXT:    store i32 2, i32* [[PSTOR1]], align 4
-; LE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 2)
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; LE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 3)
+; LE-NEXT:    store i32 1, i32* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; LE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; LE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 4)
+; LE-NEXT:    store i32 1, i32* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; LE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; LE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 5)
+; LE-NEXT:    store i32 1, i32* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; LE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; LE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 6)
+; LE-NEXT:    store i32 1, i32* [[PSTOR5]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; LE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
+; LE-NEXT:    store i32 1, i32* [[PSTOR6]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [4 x i16], [4 x i16]* @ia6a, i64 0, i64 1
Index: llvm/test/Transforms/InstCombine/memrchr-5.ll
===================================================================
--- llvm/test/Transforms/InstCombine/memrchr-5.ll
+++ llvm/test/Transforms/InstCombine/memrchr-5.ll
@@ -15,85 +15,39 @@
 
 define void @fold_memrchr_a_16(i64* %pcmp) {
 ; BE-LABEL: @fold_memrchr_a_16(
-; BE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 97, i64 16)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; BE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; BE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 98, i64 16)
-; BE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; BE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 99, i64 16)
-; BE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; BE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 100, i64 16)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; BE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PN:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 110, i64 16)
-; BE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; BE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PO:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 111, i64 16)
-; BE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; BE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 13, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; BE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; BE-NEXT:    [[PP:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 112, i64 16)
-; BE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; BE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 14, i64* [[PSTOR6]], align 4
 ; BE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; BE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; BE-NEXT:    [[PQ:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 113, i64 16)
-; BE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; BE-NEXT:    store i64 15, i64* [[PSTOR7]], align 4
 ; BE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; BE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memrchr_a_16(
-; LE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 97, i64 16)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; LE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; LE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 98, i64 16)
-; LE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; LE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 99, i64 16)
-; LE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; LE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 100, i64 16)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; LE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PN:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 110, i64 16)
-; LE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; LE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PO:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 111, i64 16)
-; LE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; LE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 14, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; LE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; LE-NEXT:    [[PP:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 112, i64 16)
-; LE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; LE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 13, i64* [[PSTOR6]], align 4
 ; LE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; LE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; LE-NEXT:    [[PQ:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 113, i64 16)
-; LE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; LE-NEXT:    store i64 12, i64* [[PSTOR7]], align 4
 ; LE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; LE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 0
@@ -171,63 +125,31 @@
 
 define void @fold_memrchr_a_p1_16(i64* %pcmp) {
 ; BE-LABEL: @fold_memrchr_a_p1_16(
-; BE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; BE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; BE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
-; BE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PF:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; BE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; BE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PG:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; BE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; BE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PH:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; BE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; BE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; BE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; BE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memrchr_a_p1_16(
-; LE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; LE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; LE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
-; LE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PF:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; LE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; LE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PG:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; LE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; LE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PH:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; LE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; LE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; LE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 1
@@ -288,57 +210,27 @@
 
 define void @fold_memrchr_a_20(i64* %pcmp) {
 ; BE-LABEL: @fold_memrchr_a_20(
-; BE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 97, i64 20)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; BE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; BE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 98, i64 20)
-; BE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; BE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 16, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 99, i64 20)
-; BE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; BE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 17, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 100, i64 20)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; BE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 18, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 101, i64 20)
-; BE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; BE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 19, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[OFFE]], i64* [[PSTOR4]], align 4
+; BE-NEXT:    store i64 4, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memrchr_a_20(
-; LE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 97, i64 20)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; LE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; LE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 98, i64 20)
-; LE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; LE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 19, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 99, i64 20)
-; LE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; LE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 18, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 100, i64 20)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; LE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 17, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 101, i64 20)
-; LE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; LE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 16, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[OFFE]], i64* [[PSTOR4]], align 4
+; LE-NEXT:    store i64 7, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 0
Index: llvm/test/Transforms/InstCombine/sprintf-2.ll
===================================================================
--- llvm/test/Transforms/InstCombine/sprintf-2.ll
+++ llvm/test/Transforms/InstCombine/sprintf-2.ll
@@ -17,32 +17,23 @@
 
 define void @fold_snprintf_member_pC(i32* %pi) {
 ; CHECK-LABEL: @fold_snprintf_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i32 [[IA0A]], i32* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0AP1:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 1))
+; CHECK-NEXT:    store i32 1, i32* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0AP1:%.*]] = getelementptr i32, i32* [[PI]], i64 1
-; CHECK-NEXT:    store i32 [[IA0AP1]], i32* [[PIA0AP1]], align 4
-; CHECK-NEXT:    [[IA0B:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i32 0, i32* [[PIA0AP1]], align 4
 ; CHECK-NEXT:    [[PIA0B:%.*]] = getelementptr i32, i32* [[PI]], i64 2
-; CHECK-NEXT:    store i32 [[IA0B]], i32* [[PIA0B]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1))
+; CHECK-NEXT:    store i32 2, i32* [[PIA0B]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i32, i32* [[PI]], i64 3
-; CHECK-NEXT:    store i32 [[IA0BP1]], i32* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0BP2:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 2))
+; CHECK-NEXT:    store i32 1, i32* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0BP2:%.*]] = getelementptr i32, i32* [[PI]], i64 4
-; CHECK-NEXT:    store i32 [[IA0BP2]], i32* [[PIA0BP2]], align 4
-; CHECK-NEXT:    [[IA0C:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 0))
+; CHECK-NEXT:    store i32 0, i32* [[PIA0BP2]], align 4
 ; CHECK-NEXT:    [[PIA0C:%.*]] = getelementptr i32, i32* [[PI]], i64 5
-; CHECK-NEXT:    store i32 [[IA0C]], i32* [[PIA0C]], align 4
-; CHECK-NEXT:    [[IA1A:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i32 3, i32* [[PIA0C]], align 4
 ; CHECK-NEXT:    [[PIA1A:%.*]] = getelementptr i32, i32* [[PI]], i64 6
-; CHECK-NEXT:    store i32 [[IA1A]], i32* [[PIA1A]], align 4
-; CHECK-NEXT:    [[IA1B:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i32 4, i32* [[PIA1A]], align 4
 ; CHECK-NEXT:    [[PIA1B:%.*]] = getelementptr i32, i32* [[PI]], i64 7
-; CHECK-NEXT:    store i32 [[IA1B]], i32* [[PIA1B]], align 4
-; CHECK-NEXT:    [[IA1C:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 0))
+; CHECK-NEXT:    store i32 5, i32* [[PIA1B]], align 4
 ; CHECK-NEXT:    [[PIA1C:%.*]] = getelementptr i32, i32* [[PI]], i64 8
-; CHECK-NEXT:    store i32 [[IA1C]], i32* [[PIA1C]], align 4
+; CHECK-NEXT:    store i32 6, i32* [[PIA1C]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %fmt = getelementptr [3 x i8], [3 x i8]* @pcnt_s, i32 0, i32 0
Index: llvm/test/Transforms/InstCombine/str-int-3.ll
===================================================================
--- llvm/test/Transforms/InstCombine/str-int-3.ll
+++ llvm/test/Transforms/InstCombine/str-int-3.ll
@@ -20,17 +20,13 @@
 
 define void @fold_atoi_member(i32* %pi) {
 ; CHECK-LABEL: @fold_atoi_member(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i32 [[IA0A]], i32* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0B:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i32 1, i32* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0B:%.*]] = getelementptr i32, i32* [[PI]], i64 1
-; CHECK-NEXT:    store i32 [[IA0B]], i32* [[PIA0B]], align 4
-; CHECK-NEXT:    [[IA1A:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i32 12, i32* [[PIA0B]], align 4
 ; CHECK-NEXT:    [[PIA1A:%.*]] = getelementptr i32, i32* [[PI]], i64 2
-; CHECK-NEXT:    store i32 [[IA1A]], i32* [[PIA1A]], align 4
-; CHECK-NEXT:    [[IA1B:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i32 123, i32* [[PIA1A]], align 4
 ; CHECK-NEXT:    [[PIA1B:%.*]] = getelementptr i32, i32* [[PI]], i64 3
-; CHECK-NEXT:    store i32 [[IA1B]], i32* [[PIA1B]], align 4
+; CHECK-NEXT:    store i32 1234, i32* [[PIA1B]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold atoi(a[0].a) to 1.
@@ -94,23 +90,17 @@
 
 define void @fold_atol_member(i64* %pi) {
 ; CHECK-LABEL: @fold_atol_member(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0B:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0B:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0B]], i64* [[PIA0B]], align 4
-; CHECK-NEXT:    [[IA0C:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 0))
+; CHECK-NEXT:    store i64 12, i64* [[PIA0B]], align 4
 ; CHECK-NEXT:    [[PIA0C:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0C]], i64* [[PIA0C]], align 4
-; CHECK-NEXT:    [[IA1A:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i64 56789, i64* [[PIA0C]], align 4
 ; CHECK-NEXT:    [[PIA1A:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1A]], i64* [[PIA1A]], align 4
-; CHECK-NEXT:    [[IA1B:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i64 123, i64* [[PIA1A]], align 4
 ; CHECK-NEXT:    [[PIA1B:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1B]], i64* [[PIA1B]], align 4
-; CHECK-NEXT:    [[IA1C:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 0))
+; CHECK-NEXT:    store i64 1234, i64* [[PIA1B]], align 4
 ; CHECK-NEXT:    [[PIA1C:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1C]], i64* [[PIA1C]], align 4
+; CHECK-NEXT:    store i64 67890, i64* [[PIA1C]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold atol(a[0].a) to 1.
@@ -158,23 +148,17 @@
 
 define void @fold_atoll_member_pC(i64* %pi) {
 ; CHECK-LABEL: @fold_atoll_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1))
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0BP1]], i64* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0CP3:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3))
+; CHECK-NEXT:    store i64 2, i64* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0CP3]], i64* [[PIA0CP3]], align 4
-; CHECK-NEXT:    [[IA1AP2:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2))
+; CHECK-NEXT:    store i64 89, i64* [[PIA0CP3]], align 4
 ; CHECK-NEXT:    [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1AP2]], i64* [[PIA1AP2]], align 4
-; CHECK-NEXT:    [[IA1BP3:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3))
+; CHECK-NEXT:    store i64 3, i64* [[PIA1AP2]], align 4
 ; CHECK-NEXT:    [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1BP3]], i64* [[PIA1BP3]], align 4
-; CHECK-NEXT:    [[IA1CP4:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4))
+; CHECK-NEXT:    store i64 4, i64* [[PIA1BP3]], align 4
 ; CHECK-NEXT:    [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1CP4]], i64* [[PIA1CP4]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PIA1CP4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold atoll(a[0].a) to 1.
@@ -222,23 +206,17 @@
 
 define void @fold_strtol_member_pC(i64* %pi) {
 ; CHECK-LABEL: @fold_strtol_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0), i8** null, i32 0)
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1), i8** null, i32 0)
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0BP1]], i64* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0CP3:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 2, i64* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0CP3]], i64* [[PIA0CP3]], align 4
-; CHECK-NEXT:    [[IA1AP2:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2), i8** null, i32 0)
+; CHECK-NEXT:    store i64 89, i64* [[PIA0CP3]], align 4
 ; CHECK-NEXT:    [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1AP2]], i64* [[PIA1AP2]], align 4
-; CHECK-NEXT:    [[IA1BP3:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 3, i64* [[PIA1AP2]], align 4
 ; CHECK-NEXT:    [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1BP3]], i64* [[PIA1BP3]], align 4
-; CHECK-NEXT:    [[IA1CP4:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4), i8** null, i32 0)
+; CHECK-NEXT:    store i64 4, i64* [[PIA1BP3]], align 4
 ; CHECK-NEXT:    [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1CP4]], i64* [[PIA1CP4]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PIA1CP4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold strtol(a[0].a, 0, 0) to 1.
@@ -286,23 +264,17 @@
 
 define void @fold_strtoll_member_pC(i64* %pi) {
 ; CHECK-LABEL: @fold_strtoll_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0), i8** null, i32 0)
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1), i8** null, i32 0)
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0BP1]], i64* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0CP3:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 2, i64* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0CP3]], i64* [[PIA0CP3]], align 4
-; CHECK-NEXT:    [[IA1AP2:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2), i8** null, i32 0)
+; CHECK-NEXT:    store i64 89, i64* [[PIA0CP3]], align 4
 ; CHECK-NEXT:    [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1AP2]], i64* [[PIA1AP2]], align 4
-; CHECK-NEXT:    [[IA1BP3:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 3, i64* [[PIA1AP2]], align 4
 ; CHECK-NEXT:    [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1BP3]], i64* [[PIA1BP3]], align 4
-; CHECK-NEXT:    [[IA1CP4:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4), i8** null, i32 0)
+; CHECK-NEXT:    store i64 4, i64* [[PIA1BP3]], align 4
 ; CHECK-NEXT:    [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1CP4]], i64* [[PIA1CP4]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PIA1CP4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold strtoll(a[0].a, 0, 0) to 1.
Index: llvm/test/Transforms/InstCombine/strcmp-3.ll
===================================================================
--- llvm/test/Transforms/InstCombine/strcmp-3.ll
+++ llvm/test/Transforms/InstCombine/strcmp-3.ll
@@ -12,8 +12,7 @@
 
 define i32 @fold_strcmp_a5i0_a5i1_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_a5i1_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 0
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0
@@ -28,7 +27,7 @@
 define i32 @call_strcmp_a5i0_a5iI(i64 %I) {
 ; CHECK-LABEL: @call_strcmp_a5i0_a5iI(
 ; CHECK-NEXT:    [[Q:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 [[I:%.*]], i64 0
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
+; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
 ; CHECK-NEXT:    ret i32 [[CMP]]
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
@@ -44,7 +43,7 @@
 define i32 @call_strcmp_a5iI_a5i0(i64 %I) {
 ; CHECK-LABEL: @call_strcmp_a5iI_a5i0(
 ; CHECK-NEXT:    [[P:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 [[I:%.*]], i64 0
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) [[P]], i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
+; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) [[P]], i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
 ; CHECK-NEXT:    ret i32 [[CMP]]
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 %I, i64 0
@@ -59,8 +58,7 @@
 
 define i32 @fold_strcmp_a5i0_a5i1_p1_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_a5i1_p1_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 1))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 -1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 1
@@ -75,7 +73,7 @@
 define i32 @call_strcmp_a5i0_a5i1_pI(i64 %I) {
 ; CHECK-LABEL: @call_strcmp_a5i0_a5i1_pI(
 ; CHECK-NEXT:    [[Q:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 [[I:%.*]]
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
+; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
 ; CHECK-NEXT:    ret i32 [[CMP]]
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
@@ -90,8 +88,7 @@
 
 define i32 @fold_strcmp_a5i0_p1_a5i1_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_p1_a5i1_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 1), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 1
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0
@@ -105,8 +102,7 @@
 
 define i32 @fold_strcmp_a5i0_a5i2_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_a5i2_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0
@@ -120,8 +116,7 @@
 
 define i32 @fold_strcmp_a5i2_a5i0_to_m1() {
 ; CHECK-LABEL: @fold_strcmp_a5i2_a5i0_to_m1(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 -1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0
Index: llvm/test/Transforms/InstCombine/strlen-5.ll
===================================================================
--- llvm/test/Transforms/InstCombine/strlen-5.ll
+++ llvm/test/Transforms/InstCombine/strlen-5.ll
@@ -12,8 +12,7 @@
 
 define i64 @fold_a5_4_i0_to_3() {
 ; CHECK-LABEL: @fold_a5_4_i0_to_3(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 3
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -25,8 +24,7 @@
 
 define i64 @fold_a5_4_i0_p1_to_2() {
 ; CHECK-LABEL: @fold_a5_4_i0_p1_to_2(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 2
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -38,8 +36,7 @@
 
 define i64 @fold_a5_4_i0_p2_to_1() {
 ; CHECK-LABEL: @fold_a5_4_i0_p2_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -51,8 +48,7 @@
 
 define i64 @fold_a5_4_i0_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i0_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -64,8 +60,7 @@
 
 define i64 @fold_a5_4_i1_to_2() {
 ; CHECK-LABEL: @fold_a5_4_i1_to_2(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 2
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -77,8 +72,7 @@
 
 define i64 @fold_a5_4_i1_p1_to_1() {
 ; CHECK-LABEL: @fold_a5_4_i1_p1_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -90,8 +84,7 @@
 
 define i64 @fold_a5_4_i1_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i1_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -103,8 +96,7 @@
 
 define i64 @fold_a5_4_i1_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i1_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -116,8 +108,7 @@
 
 define i64 @fold_a5_4_i2_to_1() {
 ; CHECK-LABEL: @fold_a5_4_i2_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -129,8 +120,7 @@
 
 define i64 @fold_a5_4_i2_p1_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i2_p1_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -142,8 +132,7 @@
 
 define i64 @fold_a5_4_i2_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i2_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -155,8 +144,7 @@
 
 define i64 @fold_a5_4_i2_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i2_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -168,8 +156,7 @@
 
 define i64 @fold_a5_4_i3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -181,8 +168,7 @@
 
 define i64 @fold_a5_4_i3_p1_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i3_p1_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -194,8 +180,7 @@
 
 define i64 @fold_a5_4_i3_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i3_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -207,8 +192,7 @@
 
 define i64 @fold_a5_3_i4_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_3_i4_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -220,8 +204,7 @@
 
 define i64 @fold_a5_4_i4_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -233,8 +216,7 @@
 
 define i64 @fold_a5_4_i4_p1_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_p1_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -246,8 +228,7 @@
 
 define i64 @fold_a5_4_i4_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -259,8 +240,7 @@
 
 define i64 @fold_a5_4_i4_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 3
   %len = call i64 @strlen(i8* %ptr)
Index: llvm/test/Transforms/InstCombine/strlen-6.ll
===================================================================
--- llvm/test/Transforms/InstCombine/strlen-6.ll
+++ llvm/test/Transforms/InstCombine/strlen-6.ll
@@ -31,8 +31,7 @@
 
 define i64 @fold_strlen_a_S3_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_S3_to_3(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 3
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 0
   %len = call i64 @strlen(i8* %ptr)
@@ -44,8 +43,7 @@
 
 define i64 @fold_strlen_a_S3_p1_to_2() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p1_to_2(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 2
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 1
   %len = call i64 @strlen(i8* %ptr)
@@ -57,8 +55,7 @@
 
 define i64 @fold_strlen_a_S3_p2_to_1() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p2_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 2
   %len = call i64 @strlen(i8* %ptr)
@@ -70,8 +67,7 @@
 
 define i64 @fold_strlen_a_S3_p3_to_0() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 3
   %len = call i64 @strlen(i8* %ptr)
@@ -83,8 +79,7 @@
 
 define i64 @fold_strlen_a_S3_s4_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_S3_s4_to_3(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 3
 ;
   %ptr = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 0
   %len = call i64 @strlen(i8* %ptr)
@@ -96,8 +91,7 @@
 
 define i64 @fold_strlen_a_S3_p2_s4_to_1() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p2_s4_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 0, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 2
   %len = call i64 @strlen(i8* %ptr)
@@ -110,10 +104,8 @@
 
 define void @fold_strlen_a_s3_S4_to_4() {
 ; CHECK-LABEL: @fold_strlen_a_s3_S4_to_4(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 4
@@ -135,10 +127,8 @@
 
 define void @fold_strlen_a_s3_S4_p1_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_s3_S4_p1_to_3(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 1))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 1))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 5
@@ -160,10 +150,8 @@
 
 define void @fold_strlen_a_s3_i32_S4_to_4() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_to_4(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 8
@@ -185,10 +173,8 @@
 
 define void @fold_strlen_a_s3_i32_S4_p1_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p1_to_3(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 1))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 9
@@ -210,10 +196,8 @@
 
 define void @fold_strlen_a_s3_i32_S4_p2_to_2() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p2_to_2(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 2))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 2))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 2, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 2, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 10
@@ -235,10 +219,8 @@
 
 define void @fold_strlen_a_s3_i32_S4_p3_to_1() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p3_to_1(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 3))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 3))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 1, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 1, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 11
@@ -260,10 +242,8 @@
 
 define void @fold_strlen_a_s3_i32_S4_p4_to_0() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p4_to_0(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 4))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 4))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 0, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 0, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 12
@@ -285,12 +265,9 @@
 
 define void @fold_strlen_ax_s() {
 ; CHECK-LABEL: @fold_strlen_ax_s(
-; CHECK-NEXT:    [[LEN3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ({ i8, [4 x i8] }, { i8, [4 x i8] }* @ax_s3, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN3]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN5:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ({ i16, [6 x i8] }, { i16, [6 x i8] }* @ax_s5, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN5]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
-; CHECK-NEXT:    [[LEN7:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ({ i32, i32, [8 x i8] }, { i32, i32, [8 x i8] }* @ax_s7, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN7]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 2), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 5, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 7, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 2), align 4
 ; CHECK-NEXT:    ret void
 ;
   %pax_s3 = getelementptr { i8, [4 x i8] }, { i8, [4 x i8] }* @ax_s3, i64 0, i32 1, i64 0
Index: llvm/test/Transforms/InstCombine/strlen-7.ll
===================================================================
--- llvm/test/Transforms/InstCombine/strlen-7.ll
+++ llvm/test/Transforms/InstCombine/strlen-7.ll
@@ -15,59 +15,41 @@
 
 define void @fold_strlen_A(i64* %plen) {
 ; CHECK-LABEL: @fold_strlen_A(
-; CHECK-NEXT:    [[LENA0A:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i64 [[LENA0A]], i64* [[PLEN:%.*]], align 4
-; CHECK-NEXT:    [[LENA0AP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 1))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN:%.*]], align 4
 ; CHECK-NEXT:    [[PLEN1:%.*]] = getelementptr i64, i64* [[PLEN]], i64 1
-; CHECK-NEXT:    store i64 [[LENA0AP1]], i64* [[PLEN1]], align 4
-; CHECK-NEXT:    [[LENA0AP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 2))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN1]], align 4
 ; CHECK-NEXT:    [[PLEN2:%.*]] = getelementptr i64, i64* [[PLEN]], i64 2
-; CHECK-NEXT:    store i64 [[LENA0AP2]], i64* [[PLEN2]], align 4
-; CHECK-NEXT:    [[LENA0AP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 3))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN2]], align 4
 ; CHECK-NEXT:    [[PLEN3:%.*]] = getelementptr i64, i64* [[PLEN]], i64 3
-; CHECK-NEXT:    store i64 [[LENA0AP3]], i64* [[PLEN3]], align 4
-; CHECK-NEXT:    [[LENA0B:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN3]], align 4
 ; CHECK-NEXT:    [[PLEN4:%.*]] = getelementptr i64, i64* [[PLEN]], i64 4
-; CHECK-NEXT:    store i64 [[LENA0B]], i64* [[PLEN4]], align 4
-; CHECK-NEXT:    [[LENA0BP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1))
+; CHECK-NEXT:    store i64 2, i64* [[PLEN4]], align 4
 ; CHECK-NEXT:    [[PLEN5:%.*]] = getelementptr i64, i64* [[PLEN]], i64 5
-; CHECK-NEXT:    store i64 [[LENA0BP1]], i64* [[PLEN5]], align 4
-; CHECK-NEXT:    [[LENA0BP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 2))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN5]], align 4
 ; CHECK-NEXT:    [[PLEN6:%.*]] = getelementptr i64, i64* [[PLEN]], i64 6
-; CHECK-NEXT:    store i64 [[LENA0BP2]], i64* [[PLEN6]], align 4
-; CHECK-NEXT:    [[LENA0BP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 3))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN6]], align 4
 ; CHECK-NEXT:    [[PLEN7:%.*]] = getelementptr i64, i64* [[PLEN]], i64 7
-; CHECK-NEXT:    store i64 [[LENA0BP3]], i64* [[PLEN7]], align 4
-; CHECK-NEXT:    [[LENA0BP4:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 4))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN7]], align 4
 ; CHECK-NEXT:    [[PLEN8:%.*]] = getelementptr i64, i64* [[PLEN]], i64 8
-; CHECK-NEXT:    store i64 [[LENA0BP4]], i64* [[PLEN8]], align 4
-; CHECK-NEXT:    [[LENA1A:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN8]], align 4
 ; CHECK-NEXT:    [[PLEN9:%.*]] = getelementptr i64, i64* [[PLEN]], i64 9
-; CHECK-NEXT:    store i64 [[LENA1A]], i64* [[PLEN9]], align 4
-; CHECK-NEXT:    [[LENA1AP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 1))
+; CHECK-NEXT:    store i64 3, i64* [[PLEN9]], align 4
 ; CHECK-NEXT:    [[PLEN10:%.*]] = getelementptr i64, i64* [[PLEN]], i64 10
-; CHECK-NEXT:    store i64 [[LENA1AP1]], i64* [[PLEN10]], align 4
-; CHECK-NEXT:    [[LENA1AP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2))
+; CHECK-NEXT:    store i64 2, i64* [[PLEN10]], align 4
 ; CHECK-NEXT:    [[PLEN11:%.*]] = getelementptr i64, i64* [[PLEN]], i64 11
-; CHECK-NEXT:    store i64 [[LENA1AP2]], i64* [[PLEN11]], align 4
-; CHECK-NEXT:    [[LENA1AP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 3))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN11]], align 4
 ; CHECK-NEXT:    [[PLEN12:%.*]] = getelementptr i64, i64* [[PLEN]], i64 12
-; CHECK-NEXT:    store i64 [[LENA1AP3]], i64* [[PLEN12]], align 4
-; CHECK-NEXT:    [[LENA1B:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN12]], align 4
 ; CHECK-NEXT:    [[PLEN14:%.*]] = getelementptr i64, i64* [[PLEN]], i64 14
-; CHECK-NEXT:    store i64 [[LENA1B]], i64* [[PLEN14]], align 4
-; CHECK-NEXT:    [[LENA1BP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 1))
+; CHECK-NEXT:    store i64 4, i64* [[PLEN14]], align 4
 ; CHECK-NEXT:    [[PLEN15:%.*]] = getelementptr i64, i64* [[PLEN]], i64 15
-; CHECK-NEXT:    store i64 [[LENA1BP1]], i64* [[PLEN15]], align 4
-; CHECK-NEXT:    [[LENA1BP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 2))
+; CHECK-NEXT:    store i64 3, i64* [[PLEN15]], align 4
 ; CHECK-NEXT:    [[PLEN16:%.*]] = getelementptr i64, i64* [[PLEN]], i64 16
-; CHECK-NEXT:    store i64 [[LENA1BP2]], i64* [[PLEN16]], align 4
-; CHECK-NEXT:    [[LENA1BP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3))
+; CHECK-NEXT:    store i64 2, i64* [[PLEN16]], align 4
 ; CHECK-NEXT:    [[PLEN17:%.*]] = getelementptr i64, i64* [[PLEN]], i64 17
-; CHECK-NEXT:    store i64 [[LENA1BP3]], i64* [[PLEN17]], align 4
-; CHECK-NEXT:    [[LENA1BP4:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 4))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN17]], align 4
 ; CHECK-NEXT:    [[PLEN18:%.*]] = getelementptr i64, i64* [[PLEN]], i64 18
-; CHECK-NEXT:    store i64 [[LENA1BP4]], i64* [[PLEN18]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PLEN18]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold strlen(a[0].a) to 1.
Index: llvm/test/Transforms/InstCombine/strlen-8.ll
===================================================================
--- llvm/test/Transforms/InstCombine/strlen-8.ll
+++ llvm/test/Transforms/InstCombine/strlen-8.ll
@@ -41,8 +41,7 @@
 
 define i64 @fold_a5_4_i2_pI(i64 %I) {
 ; CHECK-LABEL: @fold_a5_4_i2_pI(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1
   %len = call i64 @strlen(i8* %ptr)
Index: llvm/test/Transforms/InstCombine/strncmp-4.ll
===================================================================
--- llvm/test/Transforms/InstCombine/strncmp-4.ll
+++ llvm/test/Transforms/InstCombine/strncmp-4.ll
@@ -15,30 +15,22 @@
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; CHECK-NEXT:    [[PCMP1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP1]], align 4
-; CHECK-NEXT:    [[CMP2:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A:%.*]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 2)
 ; CHECK-NEXT:    [[PCMP2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT:    store i32 [[CMP2]], i32* [[PCMP2]], align 4
-; CHECK-NEXT:    [[CMP3:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 3)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP2]], align 4
 ; CHECK-NEXT:    [[PCMP3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT:    store i32 [[CMP3]], i32* [[PCMP3]], align 4
-; CHECK-NEXT:    [[CMP4:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 4)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP3]], align 4
 ; CHECK-NEXT:    [[PCMP4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT:    store i32 [[CMP4]], i32* [[PCMP4]], align 4
-; CHECK-NEXT:    [[CMP5:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 5)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP4]], align 4
 ; CHECK-NEXT:    [[PCMP5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT:    store i32 [[CMP5]], i32* [[PCMP5]], align 4
-; CHECK-NEXT:    [[CMP6:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 6)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP5]], align 4
 ; CHECK-NEXT:    [[PCMP6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; CHECK-NEXT:    store i32 [[CMP6]], i32* [[PCMP6]], align 4
-; CHECK-NEXT:    [[CMP7:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 7)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP6]], align 4
 ; CHECK-NEXT:    [[PCMP7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; CHECK-NEXT:    store i32 [[CMP7]], i32* [[PCMP7]], align 4
-; CHECK-NEXT:    [[CMP8:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 8)
+; CHECK-NEXT:    store i32 -1, i32* [[PCMP7]], align 4
 ; CHECK-NEXT:    [[PCMP8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; CHECK-NEXT:    store i32 [[CMP8]], i32* [[PCMP8]], align 4
-; CHECK-NEXT:    [[CMP9:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 9)
+; CHECK-NEXT:    store i32 -1, i32* [[PCMP8]], align 4
 ; CHECK-NEXT:    [[PCMP9:%.*]] = getelementptr i32, i32* [[PCMP]], i64 9
-; CHECK-NEXT:    store i32 [[CMP9]], i32* [[PCMP9]], align 4
+; CHECK-NEXT:    store i32 -1, i32* [[PCMP9]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; p1 = a.a
@@ -110,18 +102,14 @@
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; CHECK-NEXT:    [[PCMP1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP1]], align 4
-; CHECK-NEXT:    [[CMP2:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A:%.*]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 2)
 ; CHECK-NEXT:    [[PCMP2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT:    store i32 [[CMP2]], i32* [[PCMP2]], align 4
-; CHECK-NEXT:    [[CMP3:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 3)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP2]], align 4
 ; CHECK-NEXT:    [[PCMP3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT:    store i32 [[CMP3]], i32* [[PCMP3]], align 4
-; CHECK-NEXT:    [[CMP4:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 4)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP3]], align 4
 ; CHECK-NEXT:    [[PCMP4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT:    store i32 [[CMP4]], i32* [[PCMP4]], align 4
-; CHECK-NEXT:    [[CMP5:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 5)
+; CHECK-NEXT:    store i32 1, i32* [[PCMP4]], align 4
 ; CHECK-NEXT:    [[PCMP5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT:    store i32 [[CMP5]], i32* [[PCMP5]], align 4
+; CHECK-NEXT:    store i32 1, i32* [[PCMP5]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; p1 = &a.b[3]