diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h --- a/llvm/include/llvm/IR/Instruction.h +++ b/llvm/include/llvm/IR/Instruction.h @@ -763,6 +763,17 @@ /// Determine if one instruction is the same operation as another. bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const LLVM_READONLY; + /// This function determines if the speficied instruction has the same + /// "special" characteristics as the current one. This means that opcode + /// specific details are the same. As a common example, if we are comparing + /// loads, then hasSameSpecialState would compare the alignments (among + /// other things). + /// @returns true if the specific instruction has the same opcde specific + /// characteristics as the current one. Determine if one instruction has the + /// same state as another. + bool hasSameSpecialState(const Instruction *I2, + bool IgnoreAlignment = false) const LLVM_READONLY; + /// Return true if there are any uses of this instruction in blocks other than /// the specified block. Note that PHI nodes are considered to evaluate their /// operands in the corresponding predecessor block. diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -480,11 +480,11 @@ } } -/// Return true if both instructions have the same special state. This must be -/// kept in sync with FunctionComparator::cmpOperations in +/// This must be kept in sync with FunctionComparator::cmpOperations in /// lib/Transforms/IPO/MergeFunctions.cpp. -static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, - bool IgnoreAlignment = false) { +bool Instruction::hasSameSpecialState(const Instruction *I2, + bool IgnoreAlignment) const { + auto I1 = this; assert(I1->getOpcode() == I2->getOpcode() && "Can not compare special state of different instructions"); @@ -563,7 +563,7 @@ // If both instructions have no operands, they are identical. if (getNumOperands() == 0 && I->getNumOperands() == 0) - return haveSameSpecialState(this, I); + return this->hasSameSpecialState(I); // We have two instructions of identical opcode and #operands. Check to see // if all operands are the same. @@ -577,7 +577,7 @@ otherPHI->block_begin()); } - return haveSameSpecialState(this, I); + return this->hasSameSpecialState(I); } // Keep this in sync with FunctionComparator::cmpOperations in @@ -603,7 +603,7 @@ getOperand(i)->getType() != I->getOperand(i)->getType()) return false; - return haveSameSpecialState(this, I, IgnoreAlignment); + return this->hasSameSpecialState(I, IgnoreAlignment); } bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp --- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -78,6 +78,7 @@ #include "llvm/Transforms/Scalar/MergedLoadStoreMotion.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/InitializePasses.h" #include "llvm/Support/Debug.h" @@ -191,11 +192,16 @@ MemoryLocation Loc0 = MemoryLocation::get(Store0); MemoryLocation Loc1 = MemoryLocation::get(Store1); - if (AA->isMustAlias(Loc0, Loc1) && Store0->isSameOperationAs(Store1) && + + if (AA->isMustAlias(Loc0, Loc1) && !isStoreSinkBarrierInRange(*Store1->getNextNode(), BB1->back(), Loc1) && - !isStoreSinkBarrierInRange(*Store0->getNextNode(), BB0->back(), Loc0)) { + !isStoreSinkBarrierInRange(*Store0->getNextNode(), BB0->back(), Loc0) && + Store0->hasSameSpecialState(Store1) && + CastInst::isBitOrNoopPointerCastable( + Store0->getValueOperand()->getType(), + Store1->getValueOperand()->getType(), + Store0->getModule()->getDataLayout())) return Store1; - } } return nullptr; } @@ -254,6 +260,13 @@ S0->applyMergedLocation(S0->getDebugLoc(), S1->getDebugLoc()); S0->mergeDIAssignID(S1); + // Insert bitcast for conflicting typed stores (or just use original value if + // same type). + IRBuilder<> Builder(S0); + auto Cast = Builder.CreateBitOrPointerCast(S0->getValueOperand(), + S1->getValueOperand()->getType()); + S0->setOperand(0, Cast); + // Create the new store to be inserted at the join point. StoreInst *SNew = cast(S0->clone()); SNew->insertBefore(&*InsertPt); diff --git a/llvm/test/Transforms/MergedLoadStoreMotion/st_sink_conflict_type.ll b/llvm/test/Transforms/MergedLoadStoreMotion/st_sink_conflict_type.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergedLoadStoreMotion/st_sink_conflict_type.ll @@ -0,0 +1,281 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -passes=mldst-motion -S < %s | FileCheck %s + +define internal void @sink_conflict(ptr %this.64.val, half %val1, i16 %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_conflict +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], half [[VAL1:%.*]], i16 [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast half [[VAL1]] to i16 +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[VAL2]] to half +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[VAL2_SINK:%.*]] = phi i16 [ [[TMP0]], [[IF_THEN]] ], [ [[VAL2]], [[IF_ELSE]] ] +; CHECK-NEXT: [[VAL1_SINK:%.*]] = phi half [ [[TMP1]], [[IF_THEN]] ], [ [[VAL1]], [[IF_ELSE]] ] +; CHECK-NEXT: store i16 [[VAL2_SINK]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store half [[VAL1_SINK]], ptr [[TMP2]], align 2 +; CHECK-NEXT: ret void +; +entry: + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store half %val1, ptr %this.64.val, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store i16 %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store half %val1, ptr %add.ptr.else, align 2 + store i16 %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} + +define internal void @sink_volatile(ptr %this.64.val, half %val1, i16 %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_volatile +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], half [[VAL1:%.*]], i16 [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: store volatile half [[VAL1]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16 [[VAL2]] to half +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: store volatile i16 [[VAL2]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[VAL1_SINK:%.*]] = phi half [ [[TMP0]], [[IF_THEN]] ], [ [[VAL1]], [[IF_ELSE]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store half [[VAL1_SINK]], ptr [[TMP1]], align 2 +; CHECK-NEXT: ret void +; +entry: + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store volatile half %val1, ptr %this.64.val, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store i16 %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store half %val1, ptr %add.ptr.else, align 2 + store volatile i16 %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} + +define internal void @sink_ptr_to_int(ptr %this.64.val, ptr %val1, i64 %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_ptr_to_int +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], ptr [[VAL1:%.*]], i64 [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[VAL1]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[VAL2]] to ptr +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[VAL2_SINK:%.*]] = phi i64 [ [[TMP0]], [[IF_THEN]] ], [ [[VAL2]], [[IF_ELSE]] ] +; CHECK-NEXT: [[VAL1_SINK:%.*]] = phi ptr [ [[TMP1]], [[IF_THEN]] ], [ [[VAL1]], [[IF_ELSE]] ] +; CHECK-NEXT: store i64 [[VAL2_SINK]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store ptr [[VAL1_SINK]], ptr [[TMP2]], align 2 +; CHECK-NEXT: ret void +; +entry: + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store ptr %val1, ptr %this.64.val, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store i64 %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store ptr %val1, ptr %add.ptr.else, align 2 + store i64 %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} + + + +define internal void @sink_not_castable(ptr %this.64.val, {i32, i32} %val1, i64 %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_not_castable +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], { i32, i32 } [[VAL1:%.*]], i64 [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: store { i32, i32 } [[VAL1]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: [[ADD_PTR_THEN:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store i64 [[VAL2]], ptr [[ADD_PTR_THEN]], align 2 +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: [[ADD_PTR_ELSE:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store { i32, i32 } [[VAL1]], ptr [[ADD_PTR_ELSE]], align 2 +; CHECK-NEXT: store i64 [[VAL2]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: ret void +; +entry: + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store {i32, i32} %val1, ptr %this.64.val, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store i64 %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store {i32, i32} %val1, ptr %add.ptr.else, align 2 + store i64 %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} + +define internal void @sink_addrspace(ptr %this.64.val, ptr %val1, ptr addrspace(1) %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_addrspace +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], ptr [[VAL1:%.*]], ptr addrspace(1) [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: store ptr [[VAL1]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: [[ADD_PTR_THEN:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store ptr addrspace(1) [[VAL2]], ptr [[ADD_PTR_THEN]], align 2 +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: [[ADD_PTR_ELSE:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store ptr [[VAL1]], ptr [[ADD_PTR_ELSE]], align 2 +; CHECK-NEXT: store ptr addrspace(1) [[VAL2]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: ret void +; +entry: + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store ptr %val1, ptr %this.64.val, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store ptr addrspace(1) %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store ptr %val1, ptr %add.ptr.else, align 2 + store ptr addrspace(1) %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} + + +define internal void @sink_addrspace2(ptr %this.64.val, ptr addrspace(1) %val1, ptr addrspace(1) %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_addrspace2 +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], ptr addrspace(1) [[VAL1:%.*]], ptr addrspace(1) [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[VAL2_SINK:%.*]] = phi ptr addrspace(1) [ [[VAL1]], [[IF_THEN]] ], [ [[VAL2]], [[IF_ELSE]] ] +; CHECK-NEXT: [[VAL1_SINK:%.*]] = phi ptr addrspace(1) [ [[VAL2]], [[IF_THEN]] ], [ [[VAL1]], [[IF_ELSE]] ] +; CHECK-NEXT: store ptr addrspace(1) [[VAL2_SINK]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store ptr addrspace(1) [[VAL1_SINK]], ptr [[TMP0]], align 2 +; CHECK-NEXT: ret void +; +entry: + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store ptr addrspace(1) %val1, ptr %this.64.val, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store ptr addrspace(1) %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store ptr addrspace(1) %val1, ptr %add.ptr.else, align 2 + store ptr addrspace(1) %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} + +define internal void @sink_addrspace3(ptr %this.64.val, half %val1, i16 %val2, i32 %val3) align 2 { +; CHECK-LABEL: define internal void @sink_addrspace3 +; CHECK-SAME: (ptr [[THIS_64_VAL:%.*]], half [[VAL1:%.*]], i16 [[VAL2:%.*]], i32 [[VAL3:%.*]]) align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[THIS_AS_CASTED:%.*]] = addrspacecast ptr [[THIS_64_VAL]] to ptr addrspace(1) +; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: store half [[VAL1]], ptr addrspace(1) [[THIS_AS_CASTED]], align 2 +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16 [[VAL2]] to half +; CHECK-NEXT: br label [[IF_END:%.*]] +; CHECK: if.else: +; CHECK-NEXT: store i16 [[VAL2]], ptr [[THIS_64_VAL]], align 2 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[VAL1_SINK:%.*]] = phi half [ [[TMP0]], [[IF_THEN]] ], [ [[VAL1]], [[IF_ELSE]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[THIS_64_VAL]], i64 16 +; CHECK-NEXT: store half [[VAL1_SINK]], ptr [[TMP1]], align 2 +; CHECK-NEXT: ret void +; +entry: + %this.as.casted = addrspacecast ptr %this.64.val to ptr addrspace(1) + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else + +if.then: ; preds = %entry + store half %val1, ptr addrspace(1) %this.as.casted, align 2 + %add.ptr.then = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store i16 %val2, ptr %add.ptr.then, align 2 + br label %if.end + +if.else: ; preds = %entry + %add.ptr.else = getelementptr inbounds i8, ptr %this.64.val, i64 16 + store half %val1, ptr %add.ptr.else, align 2 + store i16 %val2, ptr %this.64.val, align 2 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} diff --git a/llvm/test/Transforms/PhaseOrdering/bitcast-store-branch.ll b/llvm/test/Transforms/PhaseOrdering/bitcast-store-branch.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/bitcast-store-branch.ll @@ -0,0 +1,61 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -O3 -S < %s | FileCheck %s + +%struct.ss = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr } + +define internal void @phantomLoad(ptr %p, ptr %y, ptr %x) { +entry: + %0 = load i32, ptr %x + store i32 %0, ptr %y + ret void +} + +define ptr @parent(ptr align 8 dereferenceable(72) %f, half %val1, i16 %val2, i32 %val3) align 2 { +; CHECK-LABEL: define nonnull ptr @parent +; CHECK-SAME: (ptr readonly returned align 8 dereferenceable(72) [[F:%.*]], half [[VAL1:%.*]], i16 [[VAL2:%.*]], i32 [[VAL3:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] align 2 { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[F]], i64 64 +; CHECK-NEXT: [[F_VAL:%.*]] = load ptr, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[CMP_NOT_NOT_I:%.*]] = icmp eq i32 [[VAL3]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = bitcast half [[VAL1]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[VAL2]] to half +; CHECK-NEXT: [[VAL2_SINK_I:%.*]] = select i1 [[CMP_NOT_NOT_I]], i16 [[TMP1]], i16 [[VAL2]] +; CHECK-NEXT: [[VAL1_SINK_I:%.*]] = select i1 [[CMP_NOT_NOT_I]], half [[TMP2]], half [[VAL1]] +; CHECK-NEXT: store i16 [[VAL2_SINK_I]], ptr [[F_VAL]], align 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[F_VAL]], i64 16 +; CHECK-NEXT: store half [[VAL1_SINK_I]], ptr [[TMP3]], align 2 +; CHECK-NEXT: ret ptr [[F]] +; +entry: + call void @badChild(ptr align 8 dereferenceable(72) %f, half %val1, i16 %val2, i32 %val3) #4 + ret ptr %f +} + + +define internal void @badChild(ptr align 8 dereferenceable(72) %this, half %val1, i16 %val2, i32 %val3) align 2 { +entry: + %othergep = getelementptr inbounds %struct.ss, ptr %this, i64 0, i32 2 + %load0 = load ptr, ptr %othergep, align 8 + %x = alloca i32 + %y = alloca i32 + call void @phantomLoad(ptr %load0, ptr %x, ptr %y) + %val1.cast = bitcast half %val1 to i16 + %cmp.not.not = icmp eq i32 %val3, 0 + br i1 %cmp.not.not, label %if.then, label %if.else +if.then: ; preds = %entry + %0 = getelementptr inbounds %struct.ss, ptr %this, i64 0, i32 8 + %1 = load ptr, ptr %0, align 8 + store i16 %val1.cast, ptr %1, align 2 + %add.ptr.i.i.i.i = getelementptr inbounds i8, ptr %1, i64 16 + store i16 %val2, ptr %add.ptr.i.i.i.i, align 2 + br label %if.end +if.else: ; preds = %entry + %2 = getelementptr inbounds %struct.ss, ptr %this, i64 0, i32 8 + %3 = load ptr, ptr %2, align 8 + %add.ptr.i.i.i.i7 = getelementptr inbounds i8, ptr %3, i64 16 + store i16 %val1.cast, ptr %add.ptr.i.i.i.i7, align 2 + store i16 %val2, ptr %3, align 2 + br label %if.end +if.end: ; preds = %if.else, %if.then + ret void +}