Index: llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h +++ llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h @@ -2113,12 +2113,15 @@ MachineMemOperand *MMO) : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {} - // In the both nodes address is Op1, mask is Op2: - // MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value - // MaskedStoreSDNode (Chain, ptr, mask, data) + // MaskedLoadSDNode (Chain, ptr, mask, passthru) + // MaskedStoreSDNode (Chain, data, ptr, mask) // Mask is a vector of i1 elements - const SDValue &getBasePtr() const { return getOperand(1); } - const SDValue &getMask() const { return getOperand(2); } + const SDValue &getBasePtr() const { + return getOperand(getOpcode() == ISD::MLOAD ? 1 : 2); + } + const SDValue &getMask() const { + return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3); + } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::MLOAD || @@ -2143,7 +2146,10 @@ return static_cast(LoadSDNodeBits.ExtTy); } + const SDValue &getBasePtr() const { return getOperand(1); } + const SDValue &getMask() const { return getOperand(2); } const SDValue &getPassThru() const { return getOperand(3); } + static bool classof(const SDNode *N) { return N->getOpcode() == ISD::MLOAD; } @@ -2175,7 +2181,9 @@ /// memory at base_addr. bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; } - const SDValue &getValue() const { return getOperand(3); } + const SDValue &getValue() const { return getOperand(1); } + const SDValue &getBasePtr() const { return getOperand(2); } + const SDValue &getMask() const { return getOperand(3); } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::MSTORE; Index: llvm/trunk/include/llvm/Target/TargetSelectionDAG.td =================================================================== --- llvm/trunk/include/llvm/Target/TargetSelectionDAG.td +++ llvm/trunk/include/llvm/Target/TargetSelectionDAG.td @@ -217,7 +217,7 @@ ]>; def SDTMaskedStore: SDTypeProfile<0, 3, [ // masked store - SDTCisPtrTy<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameNumEltsAs<1, 2> + SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2> ]>; def SDTMaskedLoad: SDTypeProfile<1, 3, [ // masked load Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -1219,28 +1219,14 @@ SDLoc dl(N); bool TruncateStore = false; - if (OpNo == 2) { - // Mask comes before the data operand. If the data operand is legal, we just - // promote the mask. - // When the data operand has illegal type, we should legalize the data - // operand first. The mask will be promoted/splitted/widened according to - // the data operand type. - if (TLI.isTypeLegal(DataVT)) { - Mask = PromoteTargetBoolean(Mask, DataVT); - // Update in place. - SmallVector NewOps(N->op_begin(), N->op_end()); - NewOps[2] = Mask; - return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); - } - - if (getTypeAction(DataVT) == TargetLowering::TypePromoteInteger) - return PromoteIntOp_MSTORE(N, 3); - if (getTypeAction(DataVT) == TargetLowering::TypeWidenVector) - return WidenVecOp_MSTORE(N, 3); - assert (getTypeAction(DataVT) == TargetLowering::TypeSplitVector); - return SplitVecOp_MSTORE(N, 3); + if (OpNo == 3) { + Mask = PromoteTargetBoolean(Mask, DataVT); + // Update in place. + SmallVector NewOps(N->op_begin(), N->op_end()); + NewOps[3] = Mask; + return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); } else { // Data operand - assert(OpNo == 3 && "Unexpected operand for promotion"); + assert(OpNo == 1 && "Unexpected operand for promotion"); DataOp = GetPromotedInteger(DataOp); TruncateStore = true; } Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -3860,7 +3860,7 @@ } SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) { - assert((OpNo == 2 || OpNo == 3) && + assert((OpNo == 1 || OpNo == 3) && "Can widen only data or mask operand of mstore"); MaskedStoreSDNode *MST = cast(N); SDValue Mask = MST->getMask(); @@ -3868,8 +3868,8 @@ SDValue StVal = MST->getValue(); SDLoc dl(N); - if (OpNo == 3) { - // Widen the value + if (OpNo == 1) { + // Widen the value. StVal = GetWidenedVector(StVal); // The mask should be widened as well. @@ -3879,18 +3879,15 @@ WideVT.getVectorNumElements()); Mask = ModifyToType(Mask, WideMaskVT, true); } else { + // Widen the mask. EVT WideMaskVT = TLI.getTypeToTransformTo(*DAG.getContext(), MaskVT); Mask = ModifyToType(Mask, WideMaskVT, true); EVT ValueVT = StVal.getValueType(); - if (getTypeAction(ValueVT) == TargetLowering::TypeWidenVector) - StVal = GetWidenedVector(StVal); - else { - EVT WideVT = EVT::getVectorVT(*DAG.getContext(), - ValueVT.getVectorElementType(), - WideMaskVT.getVectorNumElements()); - StVal = ModifyToType(StVal, WideVT); - } + EVT WideVT = EVT::getVectorVT(*DAG.getContext(), + ValueVT.getVectorElementType(), + WideMaskVT.getVectorNumElements()); + StVal = ModifyToType(StVal, WideVT); } assert(Mask.getValueType().getVectorNumElements() == Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6580,11 +6580,11 @@ } SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, - SDValue Ptr, SDValue Mask, SDValue Src0, + SDValue Ptr, SDValue Mask, SDValue PassThru, EVT MemVT, MachineMemOperand *MMO, ISD::LoadExtType ExtTy, bool isExpanding) { SDVTList VTs = getVTList(VT, MVT::Other); - SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; + SDValue Ops[] = { Chain, Ptr, Mask, PassThru }; FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); ID.AddInteger(VT.getRawBits()); @@ -6615,7 +6615,7 @@ "Invalid chain type"); EVT VT = Val.getValueType(); SDVTList VTs = getVTList(MVT::Other); - SDValue Ops[] = { Chain, Ptr, Mask, Val }; + SDValue Ops[] = { Chain, Val, Ptr, Mask }; FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); ID.AddInteger(VT.getRawBits()); Index: llvm/trunk/lib/Target/X86/X86ISelLowering.h =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.h +++ llvm/trunk/lib/Target/X86/X86ISelLowering.h @@ -1407,9 +1407,9 @@ MachineMemOperand *MMO) : MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {} - const SDValue &getBasePtr() const { return getOperand(1); } - const SDValue &getMask() const { return getOperand(2); } - const SDValue &getValue() const { return getOperand(3); } + const SDValue &getValue() const { return getOperand(1); } + const SDValue &getBasePtr() const { return getOperand(2); } + const SDValue &getMask() const { return getOperand(3); } static bool classof(const SDNode *N) { return N->getOpcode() == X86ISD::VMTRUNCSTORES || Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -21723,7 +21723,7 @@ MachineMemOperand *MMO, SelectionDAG &DAG) { SDVTList VTs = DAG.getVTList(MVT::Other); - SDValue Ops[] = { Chain, Ptr, Mask, Val }; + SDValue Ops[] = { Chain, Val, Ptr, Mask }; return SignedSat ? DAG.getTargetMemSDNode(VTs, Ops, Dl, MemVT, MMO) : DAG.getTargetMemSDNode(VTs, Ops, Dl, MemVT, MMO); Index: llvm/trunk/lib/Target/X86/X86InstrAVX512.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrAVX512.td +++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td @@ -3474,7 +3474,7 @@ [], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.MR]>, NotMemoryFoldable; - def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)), + def: Pat<(mstore (_.VT _.RC:$src), addr:$ptr, _.KRCWM:$mask), (!cast(BaseName#_.ZSuffix#mrk) addr:$ptr, _.KRCWM:$mask, _.RC:$src)>; @@ -4029,10 +4029,10 @@ multiclass avx512_store_scalar_lowering { -def : Pat<(masked_store addr:$dst, Mask, +def : Pat<(masked_store (_.info512.VT (insert_subvector undef, (_.info128.VT _.info128.RC:$src), - (iPTR 0)))), + (iPTR 0))), addr:$dst, Mask), (!cast(InstrStr#mrk) addr:$dst, (COPY_TO_REGCLASS MaskRC:$mask, VK1WM), (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>; @@ -4044,10 +4044,10 @@ dag Mask, RegisterClass MaskRC, SubRegIndex subreg> { -def : Pat<(masked_store addr:$dst, Mask, +def : Pat<(masked_store (_.info512.VT (insert_subvector undef, (_.info128.VT _.info128.RC:$src), - (iPTR 0)))), + (iPTR 0))), addr:$dst, Mask), (!cast(InstrStr#mrk) addr:$dst, (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM), (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>; @@ -4064,16 +4064,16 @@ SubRegIndex subreg> { // AVX512F pattern. -def : Pat<(masked_store addr:$dst, Mask512, +def : Pat<(masked_store (_.info512.VT (insert_subvector undef, (_.info128.VT _.info128.RC:$src), - (iPTR 0)))), + (iPTR 0))), addr:$dst, Mask512), (!cast(InstrStr#mrk) addr:$dst, (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM), (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>; // AVX512VL pattern. -def : Pat<(masked_store addr:$dst, Mask128, (_.info128.VT _.info128.RC:$src)), +def : Pat<(masked_store (_.info128.VT _.info128.RC:$src), addr:$dst, Mask128), (!cast(InstrStr#mrk) addr:$dst, (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM), (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>; @@ -8992,8 +8992,8 @@ (!cast(Name#SrcInfo.ZSuffix##mr) addr:$dst, SrcInfo.RC:$src)>; - def : Pat<(mtruncFrag addr:$dst, SrcInfo.KRCWM:$mask, - (SrcInfo.VT SrcInfo.RC:$src)), + def : Pat<(mtruncFrag (SrcInfo.VT SrcInfo.RC:$src), addr:$dst, + SrcInfo.KRCWM:$mask), (!cast(Name#SrcInfo.ZSuffix##mrk) addr:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src)>; } @@ -9714,8 +9714,7 @@ } multiclass compress_by_vec_width_lowering { - def : Pat<(X86mCompressingStore addr:$dst, _.KRCWM:$mask, - (_.VT _.RC:$src)), + def : Pat<(X86mCompressingStore (_.VT _.RC:$src), addr:$dst, _.KRCWM:$mask), (!cast(Name#_.ZSuffix##mrk) addr:$dst, _.KRCWM:$mask, _.RC:$src)>; } Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -7940,7 +7940,7 @@ multiclass maskmov_lowering { // masked store - def: Pat<(X86mstore addr:$ptr, (MaskVT RC:$mask), (VT RC:$src)), + def: Pat<(X86mstore (VT RC:$src), addr:$ptr, (MaskVT RC:$mask)), (!cast(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>; // masked load def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), undef)),