Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -229,8 +229,10 @@ /// Keeps track of non-local addresses that have been sunk into a block. /// This allows us to avoid inserting duplicate code for blocks with - /// multiple load/stores of the same address. - ValueMap SunkAddrs; + /// multiple load/stores of the same address. The usage of WeakTrackingVH + /// enables SunkAddrs to be treated as a cache whose entries can be + /// invalidated if a sunken address computation has been erased. + ValueMap SunkAddrs; /// Keeps track of all instructions inserted for the current function. SetOfInstrs InsertedInsts; @@ -4354,9 +4356,13 @@ // Now that we determined the addressing expression we want to use and know // that we have to sink it into this block. Check to see if we have already - // done this for some other load/store instr in this block. If so, reuse the - // computation. - Value *&SunkAddr = SunkAddrs[Addr]; + // done this for some other load/store instr in this block. If so, reuse + // the computation. Before attempting reuse, check if the address is valid + // as it may have been erased. + + WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; + + Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; if (SunkAddr) { DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"); @@ -4581,6 +4587,9 @@ } MemoryInst->replaceUsesOfWith(Repl, SunkAddr); + // Store the newly computed address into the cache. In the case we reused a + // value, this should be idempotent. + SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); // If we have no uses, recursively delete the value and all dead instructions // using it. Index: test/Transforms/CodeGenPrepare/Mips/lit.local.cfg =================================================================== --- /dev/null +++ test/Transforms/CodeGenPrepare/Mips/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'Mips' in config.root.targets: + config.unsupported = True Index: test/Transforms/CodeGenPrepare/Mips/pr35209.ll =================================================================== --- /dev/null +++ test/Transforms/CodeGenPrepare/Mips/pr35209.ll @@ -0,0 +1,64 @@ +; RUN: opt -S -mtriple=mips64-mti-linux-gnu -codegenprepare < %s | FileCheck %s + +; Test that if an address that was sunk from a dominating bb, used in a +; select that is erased along with its' trivally dead operand, that the +; sunken address is not reused if the same address computation occurs +; after the select. Previously, this caused a ICE. + +%struct.az = type { i32, %struct.bt* } +%struct.bt = type { i32 } +%struct.f = type { %struct.ax, %union.anon } +%struct.ax = type { %struct.az* } +%union.anon = type { %struct.bd } +%struct.bd = type { i64 } +%struct.bg = type { i32, i32 } +%struct.ap = type { i32, i32 } + +@ch = common global %struct.f zeroinitializer, align 8 +@j = common global %struct.az* null, align 8 +@ck = common global i32 0, align 4 +@h = common global i32 0, align 4 +@.str = private unnamed_addr constant [1 x i8] zeroinitializer, align 1 + +define internal void @probestart() { +entry: + %0 = load %struct.az*, %struct.az** @j, align 8 + %bw = getelementptr inbounds %struct.az, %struct.az* %0, i64 0, i32 1 + %1 = load i32, i32* @h, align 4 + %cond = icmp eq i32 %1, 0 + br i1 %cond, label %sw.bb, label %cl + +sw.bb: ; preds = %entry + %call = tail call inreg { i64, i64 } @ba(i32* bitcast (%struct.f* @ch to i32*)) + br label %cl + +cl: ; preds = %sw.bb, %entry + %2 = load %struct.bt*, %struct.bt** %bw, align 8 + %tobool = icmp eq %struct.bt* %2, null + %3 = load i32, i32* @ck, align 4 + %.sink5 = select i1 %tobool, i32* getelementptr (%struct.bg, %struct.bg* bitcast (%union.anon* getelementptr inbounds (%struct.f, %struct.f* @ch, i64 0, i32 1) to %struct.bg*), i64 0, i32 1), i32* getelementptr (%struct.ap, %struct.ap* bitcast (%union.anon* getelementptr inbounds (%struct.f, %struct.f* @ch, i64 0, i32 1) to %struct.ap*), i64 0, i32 1) + store i32 %3, i32* %.sink5, align 4 + store i32 1, i32* bitcast (i64* getelementptr inbounds (%struct.f, %struct.f* @ch, i64 0, i32 1, i32 0, i32 0) to i32*), align 8 + %4 = load %struct.bt*, %struct.bt** %bw, align 8 + tail call void (i8*, ...) @a(i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0), %struct.bt* %4) + ret void +} + +; CHECK-LABEL: @probestart() +; CHECK-LABEL: entry: +; CHECK: %[[I0:[0-9]+]] = load %struct.az*, %struct.az** @j +; CHECK-LABEL: cl: + +; CHECK-NOT: %{{[0-9]+}} = load %struct.bt*, %struct.bt** %bw +; CHECK-NOT: %{{[.a-z0-9]}} = select +; CHECK-NOT: %{{[0-9]+}} = load %struct.bt*, %struct.bt** %bw + +; CHECK: %[[I1:[0-9]+]] = bitcast %struct.az* %[[I0]] to i8* +; CHECK-NEXT: %sunkaddr = getelementptr i8, i8* %[[I1]], i64 8 +; CHECK-NEXT: %[[I2:[0-9]+]] = bitcast i8* %sunkaddr to %struct.bt** +; CHECK-NEXT: %{{[0-9]+}} = load %struct.bt*, %struct.bt** %[[I2]] +; CHECK-NEXT: tail call void (i8*, ...) @a + +declare inreg { i64, i64 } @ba(i32*) + +declare void @a(i8*, ...)