diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -98,11 +98,16 @@ Value *LoadPtr = Load->getPointerOperand(); Value *StorePtr = Store->getPointerOperand(); Type *LoadType = getLoadStoreType(Load); + Type *StoreType = getLoadStoreType(Store); - assert(LoadPtr->getType()->getPointerAddressSpace() == - StorePtr->getType()->getPointerAddressSpace() && - LoadType == getLoadStoreType(Store) && - "Should be a known dependence"); + assert( + LoadPtr->getType()->getPointerAddressSpace() == + StorePtr->getType()->getPointerAddressSpace() && + Load->getParent()->getModule()->getDataLayout().getTypeSizeInBits( + LoadType) == + Store->getParent()->getModule()->getDataLayout().getTypeSizeInBits( + StoreType) && + "Should be a known dependence"); // Currently we only support accesses with unit stride. FIXME: we should be // able to handle non unit stirde as well as long as the stride is equal to @@ -211,9 +216,19 @@ if (!Load) continue; - // Only progagate the value if they are of the same type. - if (Store->getPointerOperandType() != Load->getPointerOperandType() || - getLoadStoreType(Store) != getLoadStoreType(Load)) + // Only progagate the value if the pointees are the same size. + if (Store->getParent()->getModule()->getDataLayout().getTypeSizeInBits( + getLoadStoreType(Store)) != + Load->getParent()->getModule()->getDataLayout().getTypeSizeInBits( + getLoadStoreType(Load))) + continue; + + // Don't propagate the value if one of the pointees is a pointer but not + // the other one. + if ((getLoadStoreType(Store)->isPointerTy() && + !getLoadStoreType(Load)->isPointerTy()) || + (!getLoadStoreType(Store)->isPointerTy() && + getLoadStoreType(Load)->isPointerTy())) continue; Candidates.emplace_front(Load, Store); @@ -438,7 +453,35 @@ PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded", &L->getHeader()->front()); PHI->addIncoming(Initial, PH); - PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch()); + + Value *StoreValue; + + Type *LoadType = Initial->getType(); + Type *StoreType = Cand.Store->getOperand(0)->getType(); + + assert( + Cand.Load->getParent()->getModule()->getDataLayout().getTypeSizeInBits( + LoadType) == Cand.Store->getParent() + ->getModule() + ->getDataLayout() + .getTypeSizeInBits(StoreType) && + "The type sizes should match!"); + + assert(!((StoreType->isPointerTy() && !LoadType->isPointerTy()) || + (!StoreType->isPointerTy() && LoadType->isPointerTy())) && + "Forwarding pointer and non-pointer type!"); + + if (LoadType != StoreType) { + // Need a bitcast to convert to the loaded type + assert(CastInst::castIsValid(Instruction::BitCast, StoreType, LoadType) && + "Invalid bitcast!"); + StoreValue = + CastInst::Create(Instruction::BitCast, Cand.Store->getOperand(0), + LoadType, "store_forward_cast", Cand.Store); + } else + StoreValue = Cand.Store->getOperand(0); + + PHI->addIncoming(StoreValue, L->getLoopLatch()); Cand.Load->replaceAllUsesWith(PHI); } diff --git a/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll b/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll --- a/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll +++ b/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll @@ -1,6 +1,8 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt --opaque-pointers -loop-load-elim -S < %s | FileCheck %s -; Don't crash if the store and the load use different types. +; If the store and the load use different types, but have the same +; size then we should still be able to forward the value. ; ; for (unsigned i = 0; i < 100; i++) { ; A[i+1] = B[i] + 2; @@ -9,9 +11,32 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" -; CHECK-LABEL: @f( define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) { - +; CHECK-LABEL: @f( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, ptr [[A:%.*]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]] +; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[BIDX]], align 4 +; CHECK-NEXT: [[A_P1:%.*]] = add i32 [[B]], 2 +; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to float +; CHECK-NEXT: store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A:%.*]] = load float, ptr [[AIDX]], align 4 +; CHECK-NEXT: [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00 +; CHECK-NEXT: [[C_INT:%.*]] = fptosi float [[C]] to i32 +; CHECK-NEXT: store i32 [[C_INT]], ptr [[CIDX]], align 4 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -28,9 +53,7 @@ %a_p1 = add i32 %b, 2 store i32 %a_p1, ptr %Aidx_next, align 4 -; CHECK: %a = load float, ptr %Aidx, align 4 %a = load float, ptr %Aidx, align 4 -; CHECK-NEXT: %c = fmul float %a, 2.0 %c = fmul float %a, 2.0 %c.int = fptosi float %c to i32 store i32 %c.int, ptr %Cidx, align 4 @@ -42,7 +65,8 @@ ret void } -; Don't crash if the store and the load use different types. +; If the store and the load use different types, but have the same +; size then we should still be able to forward the value. ; ; for (unsigned i = 0; i < 100; i++) { ; A[i+1] = B[i] + 2; @@ -50,9 +74,34 @@ ; C[i] = ((float*)A)[i] * 2; ; } -; CHECK-LABEL: @f2( define void @f2(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) { - +; CHECK-LABEL: @f2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, ptr [[A:%.*]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]] +; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[BIDX]], align 4 +; CHECK-NEXT: [[A_P2:%.*]] = add i32 [[B]], 2 +; CHECK-NEXT: store i32 [[A_P2]], ptr [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A_P3:%.*]] = add i32 [[B]], 3 +; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P3]] to float +; CHECK-NEXT: store i32 [[A_P3]], ptr [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A:%.*]] = load float, ptr [[AIDX]], align 4 +; CHECK-NEXT: [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00 +; CHECK-NEXT: [[C_INT:%.*]] = fptosi float [[C]] to i32 +; CHECK-NEXT: store i32 [[C_INT]], ptr [[CIDX]], align 4 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -72,9 +121,7 @@ %a_p3 = add i32 %b, 3 store i32 %a_p3, ptr %Aidx_next, align 4 -; CHECK: %a = load float, ptr %Aidx, align 4 %a = load float, ptr %Aidx, align 4 -; CHECK-NEXT: %c = fmul float %a, 2.0 %c = fmul float %a, 2.0 %c.int = fptosi float %c to i32 store i32 %c.int, ptr %Cidx, align 4 diff --git a/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll b/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll --- a/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll +++ b/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll @@ -1,6 +1,8 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -loop-load-elim -S < %s | FileCheck %s -; Don't crash if the store and the load use different types. +; If the store and the load use different types, but have the same +; size then we should still be able to forward the value. ; ; for (unsigned i = 0; i < 100; i++) { ; A[i+1] = B[i] + 2; @@ -9,9 +11,34 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" -; CHECK-LABEL: @f( define void @f(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) { - +; CHECK-LABEL: @f( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to float* +; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, float* [[A1]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]] +; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to float* +; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4 +; CHECK-NEXT: [[A_P1:%.*]] = add i32 [[B]], 2 +; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to float +; CHECK-NEXT: store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A:%.*]] = load float, float* [[AIDX_FLOAT]], align 4 +; CHECK-NEXT: [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00 +; CHECK-NEXT: [[C_INT:%.*]] = fptosi float [[C]] to i32 +; CHECK-NEXT: store i32 [[C_INT]], i32* [[CIDX]], align 4 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -29,9 +56,7 @@ %a_p1 = add i32 %b, 2 store i32 %a_p1, i32* %Aidx_next, align 4 -; CHECK: %a = load float, float* %Aidx.float, align 4 %a = load float, float* %Aidx.float, align 4 -; CHECK-NEXT: %c = fmul float %a, 2.0 %c = fmul float %a, 2.0 %c.int = fptosi float %c to i32 store i32 %c.int, i32* %Cidx, align 4 @@ -43,7 +68,8 @@ ret void } -; Don't crash if the store and the load use different types. +; If the store and the load use different types, but have the same +; size then we should still be able to forward the value. ; ; for (unsigned i = 0; i < 100; i++) { ; A[i+1] = B[i] + 2; @@ -51,9 +77,36 @@ ; C[i] = ((float*)A)[i] * 2; ; } -; CHECK-LABEL: @f2( define void @f2(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) { - +; CHECK-LABEL: @f2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to float* +; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load float, float* [[A1]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]] +; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to float* +; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4 +; CHECK-NEXT: [[A_P2:%.*]] = add i32 [[B]], 2 +; CHECK-NEXT: store i32 [[A_P2]], i32* [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A_P3:%.*]] = add i32 [[B]], 3 +; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P3]] to float +; CHECK-NEXT: store i32 [[A_P3]], i32* [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A:%.*]] = load float, float* [[AIDX_FLOAT]], align 4 +; CHECK-NEXT: [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00 +; CHECK-NEXT: [[C_INT:%.*]] = fptosi float [[C]] to i32 +; CHECK-NEXT: store i32 [[C_INT]], i32* [[CIDX]], align 4 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -74,9 +127,7 @@ %a_p3 = add i32 %b, 3 store i32 %a_p3, i32* %Aidx_next, align 4 -; CHECK: %a = load float, float* %Aidx.float, align 4 %a = load float, float* %Aidx.float, align 4 -; CHECK-NEXT: %c = fmul float %a, 2.0 %c = fmul float %a, 2.0 %c.int = fptosi float %c to i32 store i32 %c.int, i32* %Cidx, align 4 @@ -87,3 +138,125 @@ for.end: ; preds = %for.body ret void } + +; Check that we don't forward between pointer-sized integers and actual +; pointers; We could potentially do this in future. + +define void @f3(i64* noalias %A, i64* noalias %B, i64* noalias %C, i64 %N) { +; CHECK-LABEL: @f3( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV_NEXT]] +; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i64, i64* [[C:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX_I8P:%.*]] = bitcast i64* [[AIDX]] to i8** +; CHECK-NEXT: [[B:%.*]] = load i64, i64* [[BIDX]], align 8 +; CHECK-NEXT: [[A_P1:%.*]] = add i64 [[B]], 2 +; CHECK-NEXT: store i64 [[A_P1]], i64* [[AIDX_NEXT]], align 8 +; CHECK-NEXT: [[A:%.*]] = load i8*, i8** [[AIDX_I8P]], align 8 +; CHECK-NEXT: [[C:%.*]] = getelementptr i8, i8* [[A]], i64 57 +; CHECK-NEXT: [[C_I64P:%.*]] = ptrtoint i8* [[C]] to i64 +; CHECK-NEXT: store i64 [[C_I64P]], i64* [[CIDX]], align 8 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + + %Aidx_next = getelementptr inbounds i64, i64* %A, i64 %indvars.iv.next + %Bidx = getelementptr inbounds i64, i64* %B, i64 %indvars.iv + %Cidx = getelementptr inbounds i64, i64* %C, i64 %indvars.iv + %Aidx = getelementptr inbounds i64, i64* %A, i64 %indvars.iv + %Aidx.i8p = bitcast i64* %Aidx to i8** + + %b = load i64, i64* %Bidx, align 8 + %a_p1 = add i64 %b, 2 + store i64 %a_p1, i64* %Aidx_next, align 8 + + %a = load i8*, i8** %Aidx.i8p, align 8 + %c = getelementptr i8, i8* %a, i64 57 + %c.i64p = ptrtoint i8* %c to i64 + store i64 %c.i64p, i64* %Cidx, align 8 + + %exitcond = icmp eq i64 %indvars.iv.next, %N + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +} + +; If the store and the load use different types, but have the same +; size then we should still be able to forward the value--also for +; vector types. +; +; for (unsigned i = 0; i < 100; i++) { +; A[i+1] = B[i] + 2; +; C[i] = ((float*)A)[i] * 2; +; } + +define void @f4(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) { +; CHECK-LABEL: @f4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to <2 x half>* +; CHECK-NEXT: [[LOAD_INITIAL:%.*]] = load <2 x half>, <2 x half>* [[A1]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[STORE_FORWARDED:%.*]] = phi <2 x half> [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]] +; CHECK-NEXT: [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to <2 x half>* +; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BIDX]], align 4 +; CHECK-NEXT: [[A_P1:%.*]] = add i32 [[B]], 2 +; CHECK-NEXT: [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to <2 x half> +; CHECK-NEXT: store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4 +; CHECK-NEXT: [[A:%.*]] = load <2 x half>, <2 x half>* [[AIDX_FLOAT]], align 4 +; CHECK-NEXT: [[C:%.*]] = fmul <2 x half> [[STORE_FORWARDED]], +; CHECK-NEXT: [[C_INT:%.*]] = bitcast <2 x half> [[C]] to i32 +; CHECK-NEXT: store i32 [[C_INT]], i32* [[CIDX]], align 4 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + + %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next + %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv + %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv + %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv + %Aidx.float = bitcast i32* %Aidx to <2 x half>* + + %b = load i32, i32* %Bidx, align 4 + %a_p1 = add i32 %b, 2 + store i32 %a_p1, i32* %Aidx_next, align 4 + + %a = load <2 x half>, <2 x half>* %Aidx.float, align 4 + %c = fmul <2 x half> %a, + %c.int = bitcast <2 x half> %c to i32 + store i32 %c.int, i32* %Cidx, align 4 + + %exitcond = icmp eq i64 %indvars.iv.next, %N + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +}