Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -672,6 +672,9 @@ if (NumElements == 1) { LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U), ".unpack"); + AAMDNodes AAMD; + LI.getAAMetadata(AAMD); + NewLoad->setAAMetadata(AAMD); return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( UndefValue::get(T), NewLoad, 0, Name)); } @@ -701,6 +704,10 @@ Name + ".elt"); auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); + // Propagate AA metadata. It'll still be valid on the narrowed load. + AAMDNodes AAMD; + LI.getAAMetadata(AAMD); + L->setAAMetadata(AAMD); V = IC.Builder->CreateInsertValue(V, L, i); } @@ -713,6 +720,9 @@ auto NumElements = AT->getNumElements(); if (NumElements == 1) { LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack"); + AAMDNodes AAMD; + LI.getAAMetadata(AAMD); + NewLoad->setAAMetadata(AAMD); return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( UndefValue::get(T), NewLoad, 0, Name)); } @@ -745,6 +755,9 @@ Name + ".elt"); auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset), Name + ".unpack"); + AAMDNodes AAMD; + LI.getAAMetadata(AAMD); + L->setAAMetadata(AAMD); V = IC.Builder->CreateInsertValue(V, L, i); Offset += EltSize; } @@ -1207,7 +1220,11 @@ AddrName); auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); - IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); + llvm::Instruction *NS = + IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); + AAMDNodes AAMD; + SI.getAAMetadata(AAMD); + NS->setAAMetadata(AAMD); } return true; @@ -1254,7 +1271,10 @@ AddrName); auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); auto EltAlign = MinAlign(Align, Offset); - IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); + Instruction *NS = IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); + AAMDNodes AAMD; + SI.getAAMetadata(AAMD); + NS->setAAMetadata(AAMD); Offset += EltSize; } Index: lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- lib/Transforms/InstCombine/InstructionCombining.cpp +++ lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2430,9 +2430,15 @@ Builder->SetInsertPoint(L); Value *GEP = Builder->CreateInBoundsGEP(L->getType(), L->getPointerOperand(), Indices); + Instruction *Load = Builder->CreateLoad(GEP); + // Whatever aliasing information we had for the orignal load must also + // hold for the smaller load, so propagate the annotations. + AAMDNodes Nodes; + L->getAAMetadata(Nodes); + Load->setAAMetadata(Nodes); // Returning the load directly will cause the main loop to insert it in // the wrong spot, so use replaceInstUsesWith(). - return replaceInstUsesWith(EV, Builder->CreateLoad(GEP)); + return replaceInstUsesWith(EV, Load); } // We could simplify extracts from other values. Note that nested extracts may // already be simplified implicitly by the above: extract (extract (insert) ) Index: test/Transforms/InstCombine/extractinsert-tbaa.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/extractinsert-tbaa.ll @@ -0,0 +1,45 @@ +; RUN: opt -S -instcombine %s -o - | FileCheck %s + +%Complex = type { double, double } + +; Check that instcombine preserves TBAA when narrowing loads +define double @teststructextract(%Complex *%val) { +; CHECK: load double, {{.*}}, !tbaa +; CHECK-NOT: load %Complex + %loaded = load %Complex, %Complex *%val, !tbaa !1 + %real = extractvalue %Complex %loaded, 0 + ret double %real +} + +define double @testarrayextract([2 x double] *%val) { +; CHECK: load double, {{.*}}, !tbaa +; CHECK-NOT: load [2 x double] + %loaded = load [2 x double], [2 x double] *%val, !tbaa !1 + %real = extractvalue [2 x double] %loaded, 0 + ret double %real +} + +; Check that inscombine preserves TBAA when breaking up stores +define void @teststructinsert(%Complex *%loc, double %a, double %b) { +; CHECK: store double %a, {{.*}}, !tbaa +; CHECK: store double %b, {{.*}}, !tbaa +; CHECK-NOT: store %Complex + %inserted = insertvalue %Complex undef, double %a, 0 + %inserted2 = insertvalue %Complex %inserted, double %b, 1 + store %Complex %inserted2, %Complex *%loc, !tbaa !1 + ret void +} + +define void @testarrayinsert([2 x double] *%loc, double %a, double %b) { +; CHECK: store double %a, {{.*}}, !tbaa +; CHECK: store double %b, {{.*}}, !tbaa +; CHECK-NOT: store [2 x double] + %inserted = insertvalue [2 x double] undef, double %a, 0 + %inserted2 = insertvalue [2 x double] %inserted, double %b, 1 + store [2 x double] %inserted2, [2 x double] *%loc, !tbaa !1 + ret void +} + +!0 = !{!"tbaa_root"} +!1 = !{!2, !2, i64 0} +!2 = !{!"Complex", !0, i64 0}