Index: lib/Transforms/Scalar/NewGVN.cpp =================================================================== --- lib/Transforms/Scalar/NewGVN.cpp +++ lib/Transforms/Scalar/NewGVN.cpp @@ -83,12 +83,14 @@ #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/MemorySSA.h" #include "llvm/Transforms/Utils/PredicateInfo.h" +#include "llvm/Transforms/Utils/VNCoercion.h" #include #include #include using namespace llvm; using namespace PatternMatch; using namespace llvm::GVNExpression; +using namespace llvm::VNCoercion; #define DEBUG_TYPE "newgvn" STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted"); @@ -337,6 +339,8 @@ const Expression *checkSimplificationResults(Expression *, Instruction *, Value *); const Expression *performSymbolicEvaluation(Value *); + const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *, + Instruction *, MemoryAccess *); const Expression *performSymbolicLoadEvaluation(Instruction *); const Expression *performSymbolicStoreEvaluation(Instruction *); const Expression *performSymbolicCallEvaluation(Instruction *); @@ -797,6 +801,71 @@ return createStoreExpression(SI, StoreAccess); } +// See if we can extract the value of a loaded pointer from a load, a store, or +// a memory instruction. +const Expression * +NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, + LoadInst *LI, Instruction *DepInst, + MemoryAccess *DefiningAccess) { + assert((!LI || LI->isSimple()) && "Not a simple load"); + + if (auto *DepSI = dyn_cast(DepInst)) { + // Can't forward from non-atomic to atomic without violating memory model. + // Don't need to coerce if they are the same type + if (LI->isAtomic() > DepSI->isAtomic() || + LoadType == DepSI->getValueOperand()->getType()) + return nullptr; + // auto *LoadAddressLeader = lookupOperandLeader(LoadPtr); + int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL); + if (Offset >= 0) { + if (auto *C = + dyn_cast(lookupOperandLeader(DepSI->getValueOperand()))) + return createConstantExpression( + getConstantStoreValueForLoad(C, Offset, LoadType, DL)); + } + + } else if (LoadInst *DepLI = dyn_cast(DepInst)) { + // Can't forward from non-atomic to atomic without violating memory model. + // FIXME: Check why DepLI == LI would occur + if (DepLI == LI || LI->isAtomic() > DepLI->isAtomic()) + return nullptr; + int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL); + if (Offset >= 0) { + if (auto *C = dyn_cast(lookupOperandLeader(DepLI))) + if (auto *PossibleConstant = + getConstantLoadValueForLoad(C, Offset, LoadType, DL)) + return createConstantExpression(PossibleConstant); + } + + } else if (MemIntrinsic *DepMI = dyn_cast(DepInst)) { + int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL); + if (Offset >= 0) { + if (auto *PossibleConstant = + getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) + return createConstantExpression(PossibleConstant); + } + } + // If this load really doesn't depend on anything, then we must be loading an + // undef value. This can happen when loading for a fresh allocation with no + // intervening stores, for example. + else if (isa(DepInst) || isMallocLikeFn(DepInst, TLI)) + return createConstantExpression(UndefValue::get(LoadType)); + + // If this load occurs either right after a lifetime begin, + // then the loaded value is undefined. + else if (IntrinsicInst *II = dyn_cast(DepInst)) { + if (II->getIntrinsicID() == Intrinsic::lifetime_start) + return createConstantExpression(UndefValue::get(LoadType)); + } + // If this load follows a calloc (which zero initializes memory), + // then the loaded value is zero + else if (isCallocLikeFn(DepInst, TLI)) { + return createConstantExpression(Constant::getNullValue(LoadType)); + } + + return nullptr; +} + const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) { auto *LI = cast(I); @@ -818,6 +887,13 @@ // If the defining instruction is not reachable, replace with undef. if (!ReachableBlocks.count(DefiningInst->getParent())) return createConstantExpression(UndefValue::get(LI->getType())); + // This will handle stores and memory insts. + // We only do if it the defining access has a different type. + const Expression *CoercionResult = + performSymbolicLoadCoercion(LI->getType(), LI->getPointerOperand(), + LI, DefiningInst, DefiningAccess); + if (CoercionResult) + return CoercionResult; } } Index: test/Transforms/NewGVN/calloc-load-removal.ll =================================================================== --- test/Transforms/NewGVN/calloc-load-removal.ll +++ test/Transforms/NewGVN/calloc-load-removal.ll @@ -1,4 +1,3 @@ -; XFAIL: * ; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s ; RUN: opt -S -basicaa -newgvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS ; Check that loads from calloc are recognized as being zero. Index: test/Transforms/NewGVN/fold-const-expr.ll =================================================================== --- test/Transforms/NewGVN/fold-const-expr.ll +++ test/Transforms/NewGVN/fold-const-expr.ll @@ -1,4 +1,3 @@ -; XFAIL: * ; GVN failed to do constant expression folding and expanded ; them unfolded in many places, producing exponentially large const ; expressions. As a result, the compilation never fisished. Index: test/Transforms/NewGVN/malloc-load-removal.ll =================================================================== --- test/Transforms/NewGVN/malloc-load-removal.ll +++ test/Transforms/NewGVN/malloc-load-removal.ll @@ -1,4 +1,3 @@ -; XFAIL: * ; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s ; RUN: opt -S -basicaa -newgvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS ; PR13694 Index: test/Transforms/NewGVN/pr17732.ll =================================================================== --- test/Transforms/NewGVN/pr17732.ll +++ test/Transforms/NewGVN/pr17732.ll @@ -1,6 +1,4 @@ -; XFAIL: * ; RUN: opt -newgvn -S -o - < %s | FileCheck %s -; NewGVN fails this due to missing load coercion target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" Index: test/Transforms/NewGVN/rle.ll =================================================================== --- /dev/null +++ test/Transforms/NewGVN/rle.ll @@ -0,0 +1,59 @@ +; RUN: opt < %s -data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -newgvn -S -die | FileCheck %s +; RUN: opt < %s -data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basicaa -newgvn -S -die | FileCheck %s +; memset -> i16 forwarding. +define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp { +entry: + %conv = bitcast i16* %A to i8* + tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false) + %arrayidx = getelementptr inbounds i16, i16* %A, i64 42 + %tmp2 = load i16, i16* %arrayidx + ret i16 %tmp2 +; CHECK-LABEL: @memset_to_i16_local( +; CHECK-NOT: load +; CHECK: ret i16 257 +} + +@GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 } +@GCst_as1 = addrspace(1) constant {i32, float, i32 } { i32 42, float 14., i32 97 } + +; memset -> float forwarding. +define float @memcpy_to_float_local(float* %A) nounwind ssp { +entry: + %conv = bitcast float* %A to i8* ; [#uses=1] + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false) + %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; [#uses=1] + %tmp2 = load float, float* %arrayidx ; [#uses=1] + ret float %tmp2 +; CHECK-LABEL: @memcpy_to_float_local( +; CHECK-NOT: load +; CHECK: ret float 1.400000e+01 +} +; memcpy from address space 1 +define float @memcpy_to_float_local_as1(float* %A) nounwind ssp { +entry: + %conv = bitcast float* %A to i8* ; [#uses=1] + tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* %conv, i8 addrspace(1)* bitcast ({i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i32 1, i1 false) + %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; [#uses=1] + %tmp2 = load float, float* %arrayidx ; [#uses=1] + ret float %tmp2 +; CHECK-LABEL: @memcpy_to_float_local_as1( +; CHECK-NOT: load +; CHECK: ret float 1.400000e+01 +} + +; PR6642 +define i32 @memset_to_load() nounwind readnone { +entry: + %x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2] + %tmp = bitcast [256 x i32]* %x to i8* ; [#uses=1] + call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false) + %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %x, i32 0, i32 0 ; + %tmp1 = load i32, i32* %arraydecay ; [#uses=1] + ret i32 %tmp1 +; CHECK-LABEL: @memset_to_load( +; CHECK: ret i32 0 +} +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind + +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind