diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2148,41 +2148,6 @@ GEP.getName()); } - // Skip if GEP source element type is scalable. The type alloc size is unknown - // at compile-time. - if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) { - unsigned AS = GEP.getPointerAddressSpace(); - if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == - DL.getIndexSizeInBits(AS)) { - uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); - - bool Matched = false; - uint64_t C; - Value *V = nullptr; - if (TyAllocSize == 1) { - V = GEP.getOperand(1); - Matched = true; - } else if (match(GEP.getOperand(1), - m_AShr(m_Value(V), m_ConstantInt(C)))) { - if (TyAllocSize == 1ULL << C) - Matched = true; - } else if (match(GEP.getOperand(1), - m_SDiv(m_Value(V), m_ConstantInt(C)))) { - if (TyAllocSize == C) - Matched = true; - } - - if (Matched) { - // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) - // to (bitcast Y) - Value *Y; - if (match(V, m_Sub(m_PtrToInt(m_Value(Y)), - m_PtrToInt(m_Specific(GEP.getOperand(0)))))) - return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, GEPType); - } - } - } - // We do not handle pointer-vector geps here. if (GEPType->isVectorTy()) return nullptr; diff --git a/llvm/test/Other/regr-gep-provenance.ll b/llvm/test/Other/regr-gep-provenance.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Other/regr-gep-provenance.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -S -O3 -o - | FileCheck %s + +; This test verifies that we do not optimize GEPs in a way that would lose pointer provenance (e.g. +; by optimizing a series of operations that result in `null` into a `null` (which does not have +; any provenance in particular) + +define i8 @apple(i8* %x) unnamed_addr #0 { +; CHECK-LABEL: @apple( +; CHECK-NEXT: [[E:%.*]] = load i8, i8* [[X:%.*]], align 1 +; CHECK-NEXT: ret i8 [[E]] +; + %a = ptrtoint i8* %x to i64 + %b = sub i64 0, %a + %c = getelementptr i8, i8* %x, i64 %b + %d = getelementptr i8, i8* %c, i64 %a + %e = load i8, i8* %d, align 1 + ret i8 %e +} + +define i8* @banana(i8* %b) { +; CHECK-LABEL: @banana( +; CHECK-NEXT: [[B_PTR:%.*]] = ptrtoint i8* [[B:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[B_PTR]] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, i8* [[B]], i64 [[SUB]] +; CHECK-NEXT: ret i8* [[GEP]] +; + %b_ptr = ptrtoint i8* %b to i64 + %sub = sub i64 0, %b_ptr + %gep = getelementptr i8, i8* %b, i64 %sub + ret i8* %gep +} diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll --- a/llvm/test/Transforms/InstCombine/getelementptr.ll +++ b/llvm/test/Transforms/InstCombine/getelementptr.ll @@ -1087,7 +1087,11 @@ define %struct.C* @test45(%struct.C* %c1, %struct.C** %c2) { ; CHECK-LABEL: @test45( -; CHECK-NEXT: [[GEP:%.*]] = bitcast %struct.C** [[C2:%.*]] to %struct.C* +; CHECK-NEXT: [[PTRTOINT1:%.*]] = ptrtoint %struct.C* [[C1:%.*]] to i64 +; CHECK-NEXT: [[PTRTOINT2:%.*]] = ptrtoint %struct.C** [[C2:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[PTRTOINT2]], [[PTRTOINT1]] +; CHECK-NEXT: [[SHR:%.*]] = sdiv i64 [[SUB]], 7 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [[STRUCT_C:%.*]], %struct.C* [[C1]], i64 [[SHR]] ; CHECK-NEXT: ret %struct.C* [[GEP]] ; %ptrtoint1 = ptrtoint %struct.C* %c1 to i64