Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -10046,9 +10046,12 @@ // If this is a masked load with an all ones mask, we can use a unmasked load. // FIXME: Can we do this for indexed, compressing, or truncating stores? if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) && MST->isUnindexed() && - !MST->isCompressingStore() && !MST->isTruncatingStore()) + !MST->isCompressingStore() && !MST->isTruncatingStore()) { return DAG.getStore(MST->getChain(), SDLoc(N), MST->getValue(), - MST->getBasePtr(), MST->getMemOperand()); + MST->getBasePtr(), MST->getPointerInfo(), + MST->getOriginalAlign(), MachineMemOperand::MOStore, + MST->getAAInfo()); + } // Try transforming N to an indexed store. if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) @@ -10103,8 +10106,10 @@ // FIXME: Can we do this for indexed, expanding, or extending loads? if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) && MLD->isUnindexed() && !MLD->isExpandingLoad() && MLD->getExtensionType() == ISD::NON_EXTLOAD) { - SDValue NewLd = DAG.getLoad(N->getValueType(0), SDLoc(N), MLD->getChain(), - MLD->getBasePtr(), MLD->getMemOperand()); + SDValue NewLd = DAG.getLoad( + N->getValueType(0), SDLoc(N), MLD->getChain(), MLD->getBasePtr(), + MLD->getPointerInfo(), MLD->getOriginalAlign(), + MachineMemOperand::MOLoad, MLD->getAAInfo(), MLD->getRanges()); return CombineTo(N, NewLd, NewLd.getValue(1)); } Index: llvm/test/CodeGen/X86/masked_loadstore_split.ll =================================================================== --- llvm/test/CodeGen/X86/masked_loadstore_split.ll +++ llvm/test/CodeGen/X86/masked_loadstore_split.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0 ; CHECK-NEXT: [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool) ; CHECK-NEXT: VMASKMOVPDYmr [[COPY]], 1, $noreg, 32, $noreg, killed [[VMOVAPSYrm]], [[AVX_SET0_]] :: (store unknown-size into %ir.0 + 32, align 8) - ; CHECK-NEXT: VMOVUPDYmr [[COPY]], 1, $noreg, 0, $noreg, [[AVX_SET0_]] :: (store unknown-size into %ir.0, align 8) + ; CHECK-NEXT: VMOVUPDYmr [[COPY]], 1, $noreg, 0, $noreg, [[AVX_SET0_]] :: (store (s256) into %ir.0, align 8) ; CHECK-NEXT: RET 0 entry: call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> zeroinitializer, <8 x double>* %0, i32 8, <8 x i1> ) @@ -25,7 +25,7 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; CHECK-NEXT: [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool) ; CHECK-NEXT: [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm killed [[VMOVAPSYrm]], [[COPY]], 1, $noreg, 32, $noreg :: (load unknown-size from %ir.0 + 32, align 8) - ; CHECK-NEXT: [[VMOVUPDYrm:%[0-9]+]]:vr256 = VMOVUPDYrm [[COPY]], 1, $noreg, 0, $noreg :: (load unknown-size from %ir.0, align 8) + ; CHECK-NEXT: [[VMOVUPDYrm:%[0-9]+]]:vr256 = VMOVUPDYrm [[COPY]], 1, $noreg, 0, $noreg :: (load (s256) from %ir.0, align 8) ; CHECK-NEXT: $ymm0 = COPY [[VMOVUPDYrm]] ; CHECK-NEXT: $ymm1 = COPY [[VMASKMOVPDYrm]] ; CHECK-NEXT: RET 0, $ymm0, $ymm1