diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -259,6 +259,8 @@ SDValue &Index, SDValue &Disp, SDValue &Segment); + bool isProfitableToFormMaskedOp(SDNode *N) const; + /// Implement addressing mode selection for inline asm expressions. bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, @@ -722,6 +724,77 @@ return true; } +// Indicates it is profitable form an AVX512 masked operation. Returning false +// will favor a masked register-register blend and the operation will be +// selected separately. +bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const { + assert(N->getOpcode() == ISD::VSELECT && "Unexpected opcode!"); + + // FIXME: Fix tests and remove these. + switch (N->getOperand(1).getOpcode()) { + case ISD::ABS: + case ISD::BITCAST: + case ISD::CTLZ: + case ISD::EXTRACT_SUBVECTOR: case ISD::INSERT_SUBVECTOR: + case ISD::FP_EXTEND: + case ISD::MULHS: case ISD::MULHU: + case ISD::ROTL: case ISD::ROTR: + case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: + case ISD::TRUNCATE: + case ISD::SMAX: case ISD::SMIN: + case ISD::UMAX: case ISD::UMIN: + case ISD::SIGN_EXTEND: case ISD::SIGN_EXTEND_VECTOR_INREG: + case ISD::ZERO_EXTEND: case ISD::ZERO_EXTEND_VECTOR_INREG: + case X86ISD::AVG: + case X86ISD::CONFLICT: + case X86ISD::CVTP2SI: case X86ISD::CVTP2UI: + case X86ISD::CVTTP2SI: case X86ISD::CVTTP2UI: + case X86ISD::CVTSI2P: case X86ISD::CVTUI2P: + case X86ISD::DBPSADBW: + case X86ISD::GF2P8AFFINEQB: + case X86ISD::GF2P8AFFINEINVQB: + case X86ISD::GF2P8MULB: + case X86ISD::MOVDDUP: + case X86ISD::MOVSHDUP: case X86ISD::MOVSLDUP: + case X86ISD::MULHRS: + case X86ISD::MULTISHIFT: + case X86ISD::PALIGNR: + case X86ISD::PSHUFB: + case X86ISD::PSHUFD: + case X86ISD::PSHUFHW: case X86ISD::PSHUFLW: + case X86ISD::SCALEF: + case X86ISD::SHUF128: + case X86ISD::SHUFP: + case X86ISD::UNPCKH: case X86ISD::UNPCKL: + case X86ISD::VALIGN: + case X86ISD::VBROADCAST: + case X86ISD::VFIXUPIMM: + case X86ISD::VFPEXT: case X86ISD::VFPROUND: + case X86ISD::VGETMANT: + case X86ISD::VPDPBUSD: case X86ISD::VPDPBUSDS: + case X86ISD::VPDPWSSD: case X86ISD::VPDPWSSDS: + case X86ISD::VPERMILPI: case X86ISD::VPERMILPV: + case X86ISD::VPERMI: case X86ISD::VPERMV: case X86ISD::VPERMV3: + case X86ISD::VPMADD52L: case X86ISD::VPMADD52H: + case X86ISD::VPMADDUBSW: case X86ISD::VPMADDWD: + case X86ISD::VPTERNLOG: + case X86ISD::VREDUCE: + case X86ISD::VROTLI: case X86ISD::VROTRI: + case X86ISD::VSHLD: case X86ISD::VSHLDV: + case X86ISD::VSHRD: case X86ISD::VSHRDV: + case X86ISD::VSHL: case X86ISD::VSHLI: case X86ISD::VSHLV: + case X86ISD::VSRL: case X86ISD::VSRLI: case X86ISD::VSRLV: + case X86ISD::VSRA: case X86ISD::VSRAI: case X86ISD::VSRAV: + case X86ISD::VTRUNC: case X86ISD::VTRUNCS: case X86ISD::VTRUNCUS: + return true; + } + + // If the operation has additional users, the operaiton will be duplicated. + // Check the use count to preven that. + // FIXME: Are there cheap opcodes we might want to duplicate? + return N->getOperand(1).hasOneUse(); +} + /// Replace the original chain operand of the call with /// load's chain operand and move load below the call's chain operand. static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -169,6 +169,13 @@ def v32i1_info : X86KVectorVTInfo; def v64i1_info : X86KVectorVTInfo; +// Used for matching masked operations. Ensures the operation part only has a +// single use. +def vselect_mask : PatFrag<(ops node:$mask, node:$src1, node:$src2), + (vselect node:$mask, node:$src1, node:$src2), [{ + return isProfitableToFormMaskedOp(N); +}]>; + // This multiclass generates the masking variants from the non-masking // variant. It only provides the assembly pieces for the masking variants. // It assumes custom ISel patterns for masking which can be provided as @@ -220,7 +227,7 @@ string OpcodeStr, string AttSrcAsm, string IntelSrcAsm, dag RHS, dag MaskingRHS, - SDNode Select = vselect, + SDPatternOperator Select = vselect_mask, string MaskingConstraint = "", bit IsCommutable = 0, bit IsKCommutable = 0, @@ -250,9 +257,9 @@ OpcodeStr, AttSrcAsm, IntelSrcAsm, [(set _.RC:$dst, RHS)], [(set _.RC:$dst, - (vselect _.KRCWM:$mask, MaskRHS, _.RC:$src0))], + (vselect_mask _.KRCWM:$mask, MaskRHS, _.RC:$src0))], [(set _.RC:$dst, - (vselect _.KRCWM:$mask, MaskRHS, _.ImmAllZerosV))], + (vselect_mask _.KRCWM:$mask, MaskRHS, _.ImmAllZerosV))], "$src0 = $dst", IsCommutable, IsKCommutable, IsKZCommutable>; @@ -265,7 +272,7 @@ dag RHS, bit IsCommutable = 0, bit IsKCommutable = 0, bit IsKZCommutable = IsCommutable, - SDNode Select = vselect> : + SDPatternOperator Select = vselect_mask> : AVX512_maskable_common : AVX512_maskable_common; + vselect_mask, "", IsCommutable>; multiclass AVX512_maskable_3src_scalar O, Format F, X86VectorVTInfo _, dag Outs, dag NonTiedIns, string OpcodeStr, @@ -426,9 +433,9 @@ OpcodeStr, AttSrcAsm, IntelSrcAsm, [(set _.RC:$dst, RHS)], [(set _.RC:$dst, - (vselect _.KRCWM:$mask, MaskingRHS, _.RC:$src1))], + (vselect_mask _.KRCWM:$mask, MaskingRHS, _.RC:$src1))], [(set _.RC:$dst, - (vselect _.KRCWM:$mask, MaskingRHS, _.ImmAllZerosV))], + (vselect_mask _.KRCWM:$mask, MaskingRHS, _.ImmAllZerosV))], "", IsCommutable, IsKCommutable>; // Alias instruction that maps zero vector to pxor / xorp* for AVX-512. @@ -656,45 +663,45 @@ list p> { let Predicates = p in { def : Pat<(Cast.VT - (vselect Cast.KRCWM:$mask, - (bitconvert - (vinsert_insert:$ins (To.VT To.RC:$src1), - (From.VT From.RC:$src2), - (iPTR imm))), - Cast.RC:$src0)), + (vselect_mask Cast.KRCWM:$mask, + (bitconvert + (vinsert_insert:$ins (To.VT To.RC:$src1), + (From.VT From.RC:$src2), + (iPTR imm))), + Cast.RC:$src0)), (!cast(InstrStr#"rrk") Cast.RC:$src0, Cast.KRCWM:$mask, To.RC:$src1, From.RC:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; def : Pat<(Cast.VT - (vselect Cast.KRCWM:$mask, - (bitconvert - (vinsert_insert:$ins (To.VT To.RC:$src1), - (From.VT - (bitconvert - (From.LdFrag addr:$src2))), - (iPTR imm))), - Cast.RC:$src0)), + (vselect_mask Cast.KRCWM:$mask, + (bitconvert + (vinsert_insert:$ins (To.VT To.RC:$src1), + (From.VT + (bitconvert + (From.LdFrag addr:$src2))), + (iPTR imm))), + Cast.RC:$src0)), (!cast(InstrStr#"rmk") Cast.RC:$src0, Cast.KRCWM:$mask, To.RC:$src1, addr:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; def : Pat<(Cast.VT - (vselect Cast.KRCWM:$mask, - (bitconvert - (vinsert_insert:$ins (To.VT To.RC:$src1), - (From.VT From.RC:$src2), - (iPTR imm))), - Cast.ImmAllZerosV)), + (vselect_mask Cast.KRCWM:$mask, + (bitconvert + (vinsert_insert:$ins (To.VT To.RC:$src1), + (From.VT From.RC:$src2), + (iPTR imm))), + Cast.ImmAllZerosV)), (!cast(InstrStr#"rrkz") Cast.KRCWM:$mask, To.RC:$src1, From.RC:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; def : Pat<(Cast.VT - (vselect Cast.KRCWM:$mask, - (bitconvert - (vinsert_insert:$ins (To.VT To.RC:$src1), - (From.VT (From.LdFrag addr:$src2)), - (iPTR imm))), - Cast.ImmAllZerosV)), + (vselect_mask Cast.KRCWM:$mask, + (bitconvert + (vinsert_insert:$ins (To.VT To.RC:$src1), + (From.VT (From.LdFrag addr:$src2)), + (iPTR imm))), + Cast.ImmAllZerosV)), (!cast(InstrStr#"rmkz") Cast.KRCWM:$mask, To.RC:$src1, addr:$src2, (INSERT_get_vinsert_imm To.RC:$ins))>; @@ -1012,20 +1019,20 @@ SDNodeXForm EXTRACT_get_vextract_imm, list p> { let Predicates = p in { - def : Pat<(Cast.VT (vselect Cast.KRCWM:$mask, - (bitconvert - (To.VT (vextract_extract:$ext - (From.VT From.RC:$src), (iPTR imm)))), - To.RC:$src0)), + def : Pat<(Cast.VT (vselect_mask Cast.KRCWM:$mask, + (bitconvert + (To.VT (vextract_extract:$ext + (From.VT From.RC:$src), (iPTR imm)))), + To.RC:$src0)), (Cast.VT (!cast(InstrStr#"rrk") Cast.RC:$src0, Cast.KRCWM:$mask, From.RC:$src, (EXTRACT_get_vextract_imm To.RC:$ext)))>; - def : Pat<(Cast.VT (vselect Cast.KRCWM:$mask, - (bitconvert - (To.VT (vextract_extract:$ext - (From.VT From.RC:$src), (iPTR imm)))), - Cast.ImmAllZerosV)), + def : Pat<(Cast.VT (vselect_mask Cast.KRCWM:$mask, + (bitconvert + (To.VT (vextract_extract:$ext + (From.VT From.RC:$src), (iPTR imm)))), + Cast.ImmAllZerosV)), (Cast.VT (!cast(InstrStr#"rrkz") Cast.KRCWM:$mask, From.RC:$src, (EXTRACT_get_vextract_imm To.RC:$ext)))>; @@ -1134,15 +1141,15 @@ def : Pat<(DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)), (!cast(Name#DestInfo.ZSuffix#rr) (SrcInfo.VT (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC)))>; - def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask, - (X86VBroadcast SrcInfo.FRC:$src), - DestInfo.RC:$src0)), + def : Pat<(DestInfo.VT (vselect_mask DestInfo.KRCWM:$mask, + (X86VBroadcast SrcInfo.FRC:$src), + DestInfo.RC:$src0)), (!cast(Name#DestInfo.ZSuffix#rrk) DestInfo.RC:$src0, DestInfo.KRCWM:$mask, (SrcInfo.VT (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC)))>; - def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask, - (X86VBroadcast SrcInfo.FRC:$src), - DestInfo.ImmAllZerosV)), + def : Pat<(DestInfo.VT (vselect_mask DestInfo.KRCWM:$mask, + (X86VBroadcast SrcInfo.FRC:$src), + DestInfo.ImmAllZerosV)), (!cast(Name#DestInfo.ZSuffix#rrkz) DestInfo.KRCWM:$mask, (SrcInfo.VT (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC)))>; } @@ -1172,7 +1179,7 @@ !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|", "${dst} {${mask}} {z}, $src}"), [(set MaskInfo.RC:$dst, - (vselect MaskInfo.KRCWM:$mask, + (vselect_mask MaskInfo.KRCWM:$mask, (MaskInfo.VT (bitconvert (DestInfo.VT @@ -1186,7 +1193,7 @@ !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}}|", "${dst} {${mask}}, $src}"), [(set MaskInfo.RC:$dst, - (vselect MaskInfo.KRCWM:$mask, + (vselect_mask MaskInfo.KRCWM:$mask, (MaskInfo.VT (bitconvert (DestInfo.VT @@ -1211,7 +1218,7 @@ !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|", "${dst} {${mask}} {z}, $src}"), [(set MaskInfo.RC:$dst, - (vselect MaskInfo.KRCWM:$mask, + (vselect_mask MaskInfo.KRCWM:$mask, (MaskInfo.VT (bitconvert (DestInfo.VT @@ -1228,7 +1235,7 @@ !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}}|", "${dst} {${mask}}, $src}"), [(set MaskInfo.RC:$dst, - (vselect MaskInfo.KRCWM:$mask, + (vselect_mask MaskInfo.KRCWM:$mask, (MaskInfo.VT (bitconvert (DestInfo.VT @@ -1321,11 +1328,11 @@ (!cast(Name#rr) (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), SrcRC:$src, Subreg)))>; - def : Pat <(vselect _.KRCWM:$mask, (_.VT (OpNode SrcRC:$src)), _.RC:$src0), + def : Pat <(vselect_mask _.KRCWM:$mask, (_.VT (OpNode SrcRC:$src)), _.RC:$src0), (!cast(Name#rrk) _.RC:$src0, _.KRCWM:$mask, (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), SrcRC:$src, Subreg)))>; - def : Pat <(vselect _.KRCWM:$mask, (_.VT (OpNode SrcRC:$src)), _.ImmAllZerosV), + def : Pat <(vselect_mask _.KRCWM:$mask, (_.VT (OpNode SrcRC:$src)), _.ImmAllZerosV), (!cast(Name#rrkz) _.KRCWM:$mask, (i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), SrcRC:$src, Subreg)))>; } @@ -1481,38 +1488,38 @@ (VBROADCASTI32X4rm addr:$src)>; // Patterns for selects of bitcasted operations. -def : Pat<(vselect VK16WM:$mask, - (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), - (v16f32 immAllZerosV)), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), + (v16f32 immAllZerosV)), (VBROADCASTF32X4rmkz VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK16WM:$mask, - (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), + VR512:$src0), (VBROADCASTF32X4rmk VR512:$src0, VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK16WM:$mask, - (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), - (v16i32 immAllZerosV)), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), + (v16i32 immAllZerosV)), (VBROADCASTI32X4rmkz VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK16WM:$mask, - (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), + VR512:$src0), (VBROADCASTI32X4rmk VR512:$src0, VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv8f32 addr:$src)))), - (v8f64 immAllZerosV)), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv8f32 addr:$src)))), + (v8f64 immAllZerosV)), (VBROADCASTF64X4rmkz VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv8f32 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv8f32 addr:$src)))), + VR512:$src0), (VBROADCASTF64X4rmk VR512:$src0, VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv8i32 addr:$src)))), - (v8i64 immAllZerosV)), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv8i32 addr:$src)))), + (v8i64 immAllZerosV)), (VBROADCASTI64X4rmkz VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv8i32 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv8i32 addr:$src)))), + VR512:$src0), (VBROADCASTI64X4rmk VR512:$src0, VK8WM:$mask, addr:$src)>; } @@ -1534,21 +1541,21 @@ (VBROADCASTI32X4Z256rm addr:$src)>; // Patterns for selects of bitcasted operations. -def : Pat<(vselect VK8WM:$mask, - (bc_v8f32 (v4f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), - (v8f32 immAllZerosV)), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8f32 (v4f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), + (v8f32 immAllZerosV)), (VBROADCASTF32X4Z256rmkz VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8f32 (v4f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), - VR256X:$src0), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8f32 (v4f64 (X86SubVBroadcast (loadv2f64 addr:$src)))), + VR256X:$src0), (VBROADCASTF32X4Z256rmk VR256X:$src0, VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8i32 (v4i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), - (v8i32 immAllZerosV)), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8i32 (v4i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), + (v8i32 immAllZerosV)), (VBROADCASTI32X4Z256rmkz VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8i32 (v4i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), - VR256X:$src0), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8i32 (v4i64 (X86SubVBroadcast (loadv2i64 addr:$src)))), + VR256X:$src0), (VBROADCASTI32X4Z256rmk VR256X:$src0, VK8WM:$mask, addr:$src)>; @@ -1583,21 +1590,21 @@ EVEX_V256, EVEX_CD8<64, CD8VT2>; // Patterns for selects of bitcasted operations. -def : Pat<(vselect VK4WM:$mask, - (bc_v4f64 (v8f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), - (v4f64 immAllZerosV)), +def : Pat<(vselect_mask VK4WM:$mask, + (bc_v4f64 (v8f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), + (v4f64 immAllZerosV)), (VBROADCASTF64X2Z128rmkz VK4WM:$mask, addr:$src)>; -def : Pat<(vselect VK4WM:$mask, - (bc_v4f64 (v8f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), - VR256X:$src0), +def : Pat<(vselect_mask VK4WM:$mask, + (bc_v4f64 (v8f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), + VR256X:$src0), (VBROADCASTF64X2Z128rmk VR256X:$src0, VK4WM:$mask, addr:$src)>; -def : Pat<(vselect VK4WM:$mask, - (bc_v4i64 (v8i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), - (v4i64 immAllZerosV)), +def : Pat<(vselect_mask VK4WM:$mask, + (bc_v4i64 (v8i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), + (v4i64 immAllZerosV)), (VBROADCASTI64X2Z128rmkz VK4WM:$mask, addr:$src)>; -def : Pat<(vselect VK4WM:$mask, - (bc_v4i64 (v8i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), - VR256X:$src0), +def : Pat<(vselect_mask VK4WM:$mask, + (bc_v4i64 (v8i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), + VR256X:$src0), (VBROADCASTI64X2Z128rmk VR256X:$src0, VK4WM:$mask, addr:$src)>; } @@ -1616,38 +1623,38 @@ EVEX_V512, EVEX_CD8<32, CD8VT8>; // Patterns for selects of bitcasted operations. -def : Pat<(vselect VK16WM:$mask, - (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv4f64 addr:$src)))), - (v16f32 immAllZerosV)), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv4f64 addr:$src)))), + (v16f32 immAllZerosV)), (VBROADCASTF32X8rmkz VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK16WM:$mask, - (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv4f64 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16f32 (v8f64 (X86SubVBroadcast (loadv4f64 addr:$src)))), + VR512:$src0), (VBROADCASTF32X8rmk VR512:$src0, VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK16WM:$mask, - (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv4i64 addr:$src)))), - (v16i32 immAllZerosV)), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv4i64 addr:$src)))), + (v16i32 immAllZerosV)), (VBROADCASTI32X8rmkz VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK16WM:$mask, - (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv4i64 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK16WM:$mask, + (bc_v16i32 (v8i64 (X86SubVBroadcast (loadv4i64 addr:$src)))), + VR512:$src0), (VBROADCASTI32X8rmk VR512:$src0, VK16WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), - (v8f64 immAllZerosV)), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), + (v8f64 immAllZerosV)), (VBROADCASTF64X2rmkz VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8f64 (v16f32 (X86SubVBroadcast (loadv4f32 addr:$src)))), + VR512:$src0), (VBROADCASTF64X2rmk VR512:$src0, VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), - (v8i64 immAllZerosV)), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), + (v8i64 immAllZerosV)), (VBROADCASTI64X2rmkz VK8WM:$mask, addr:$src)>; -def : Pat<(vselect VK8WM:$mask, - (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), - VR512:$src0), +def : Pat<(vselect_mask VK8WM:$mask, + (bc_v8i64 (v16i32 (X86SubVBroadcast (loadv4i32 addr:$src)))), + VR512:$src0), (VBROADCASTI64X2rmk VR512:$src0, VK8WM:$mask, addr:$src)>; } @@ -1801,24 +1808,27 @@ multiclass avx512_perm_i_lowering { - def : Pat<(_.VT (vselect _.KRCWM:$mask, - (X86VPermt2 (_.VT _.RC:$src2), - (IdxVT.VT (bitconvert (CastVT.VT _.RC:$src1))), _.RC:$src3), - (_.VT (bitconvert (CastVT.VT _.RC:$src1))))), + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, + (X86VPermt2 (_.VT _.RC:$src2), + (IdxVT.VT (bitconvert + (CastVT.VT _.RC:$src1))), + _.RC:$src3), + (_.VT (bitconvert (CastVT.VT _.RC:$src1))))), (!cast(InstrStr#"rrk") _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, _.RC:$src3)>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, - (X86VPermt2 _.RC:$src2, - (IdxVT.VT (bitconvert (CastVT.VT _.RC:$src1))), - (_.LdFrag addr:$src3)), - (_.VT (bitconvert (CastVT.VT _.RC:$src1))))), + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, + (X86VPermt2 _.RC:$src2, + (IdxVT.VT (bitconvert + (CastVT.VT _.RC:$src1))), + (_.LdFrag addr:$src3)), + (_.VT (bitconvert (CastVT.VT _.RC:$src1))))), (!cast(InstrStr#"rmk") _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3)>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, - (X86VPermt2 _.RC:$src2, - (IdxVT.VT (bitconvert (CastVT.VT _.RC:$src1))), - (_.BroadcastLdFrag addr:$src3)), - (_.VT (bitconvert (CastVT.VT _.RC:$src1))))), + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, + (X86VPermt2 _.RC:$src2, + (IdxVT.VT (bitconvert (CastVT.VT _.RC:$src1))), + (_.BroadcastLdFrag addr:$src3)), + (_.VT (bitconvert (CastVT.VT _.RC:$src1))))), (!cast(InstrStr#"rmbk") _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3)>; } @@ -3374,7 +3384,7 @@ !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|", "${dst} {${mask}}, $src1}"), [(set _.RC:$dst, (_.VT - (vselect _.KRCWM:$mask, + (vselect_mask _.KRCWM:$mask, (_.VT (ld_frag addr:$src1)), (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.RM]>; @@ -3383,7 +3393,7 @@ (ins _.KRCWM:$mask, _.MemOp:$src), OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"# "${dst} {${mask}} {z}, $src}", - [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask, + [(set _.RC:$dst, (_.VT (vselect_mask _.KRCWM:$mask, (_.VT (ld_frag addr:$src)), _.ImmAllZerosV)))], _.ExeDomain>, EVEX, EVEX_KZ, Sched<[Sched.RM]>; } @@ -5122,26 +5132,26 @@ X86VectorVTInfo _, X86VectorVTInfo IntInfo> { // Masked register-register logical operations. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (bitconvert (IntInfo.VT (OpNode _.RC:$src1, _.RC:$src2))), _.RC:$src0)), (!cast(InstrStr#rrk) _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, _.RC:$src2)>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (bitconvert (IntInfo.VT (OpNode _.RC:$src1, _.RC:$src2))), _.ImmAllZerosV)), (!cast(InstrStr#rrkz) _.KRCWM:$mask, _.RC:$src1, _.RC:$src2)>; // Masked register-memory logical operations. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (bitconvert (IntInfo.VT (OpNode _.RC:$src1, (load addr:$src2)))), _.RC:$src0)), (!cast(InstrStr#rmk) _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, addr:$src2)>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (bitconvert (IntInfo.VT (OpNode _.RC:$src1, (load addr:$src2)))), _.ImmAllZerosV)), @@ -5153,14 +5163,14 @@ X86VectorVTInfo _, X86VectorVTInfo IntInfo> { // Register-broadcast logical operations. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (bitconvert (IntInfo.VT (OpNode _.RC:$src1, (IntInfo.VT (IntInfo.BroadcastLdFrag addr:$src2))))), _.RC:$src0)), (!cast(InstrStr#rmbk) _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, addr:$src2)>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (bitconvert (IntInfo.VT (OpNode _.RC:$src1, (IntInfo.VT (IntInfo.BroadcastLdFrag addr:$src2))))), @@ -7566,12 +7576,12 @@ (ins MaskRC:$mask, _Src.RC:$src), OpcodeStr, "$src", "$src", (_.VT (OpNode (_Src.VT _Src.RC:$src))), - (vselect MaskRC:$mask, - (_.VT (MaskOpNode (_Src.VT _Src.RC:$src))), - _.RC:$src0), - (vselect MaskRC:$mask, - (_.VT (MaskOpNode (_Src.VT _Src.RC:$src))), - _.ImmAllZerosV)>, + (vselect_mask MaskRC:$mask, + (_.VT (MaskOpNode (_Src.VT _Src.RC:$src))), + _.RC:$src0), + (vselect_mask MaskRC:$mask, + (_.VT (MaskOpNode (_Src.VT _Src.RC:$src))), + _.ImmAllZerosV)>, EVEX, Sched<[sched]>; defm rm : AVX512_maskable_cvt, + (vselect_mask MaskRC:$mask, MaskLdDAG, _.RC:$src0), + (vselect_mask MaskRC:$mask, MaskLdDAG, _.ImmAllZerosV)>, EVEX, Sched<[sched.Folded]>; defm rmb : AVX512_maskable_cvt, + (vselect_mask MaskRC:$mask, + (_.VT + (MaskOpNode + (_Src.VT + (_Src.BroadcastLdFrag addr:$src)))), + _.RC:$src0), + (vselect_mask MaskRC:$mask, + (_.VT + (MaskOpNode + (_Src.VT + (_Src.BroadcastLdFrag addr:$src)))), + _.ImmAllZerosV)>, EVEX, EVEX_B, Sched<[sched.Folded]>; } } @@ -8365,70 +8375,70 @@ let Predicates = [HasDQI, HasVLX] in { def : Pat<(v2i64 (X86cvtp2Int (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), (VCVTPS2QQZ128rm addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvtp2Int (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - VR128X:$src0)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvtp2Int (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + VR128X:$src0)), (VCVTPS2QQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvtp2Int (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - v2i64x_info.ImmAllZerosV)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvtp2Int (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + v2i64x_info.ImmAllZerosV)), (VCVTPS2QQZ128rmkz VK2WM:$mask, addr:$src)>; def : Pat<(v2i64 (X86cvtp2UInt (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), (VCVTPS2UQQZ128rm addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvtp2UInt (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - VR128X:$src0)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvtp2UInt (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + VR128X:$src0)), (VCVTPS2UQQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvtp2UInt (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - v2i64x_info.ImmAllZerosV)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvtp2UInt (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + v2i64x_info.ImmAllZerosV)), (VCVTPS2UQQZ128rmkz VK2WM:$mask, addr:$src)>; def : Pat<(v2i64 (X86any_cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), (VCVTTPS2QQZ128rm addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - VR128X:$src0)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + VR128X:$src0)), (VCVTTPS2QQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - v2i64x_info.ImmAllZerosV)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + v2i64x_info.ImmAllZerosV)), (VCVTTPS2QQZ128rmkz VK2WM:$mask, addr:$src)>; def : Pat<(v2i64 (X86any_cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), (VCVTTPS2UQQZ128rm addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - VR128X:$src0)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + VR128X:$src0)), (VCVTTPS2UQQZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; - def : Pat<(v2i64 (vselect VK2WM:$mask, - (X86cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), - v2i64x_info.ImmAllZerosV)), + def : Pat<(v2i64 (vselect_mask VK2WM:$mask, + (X86cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), + v2i64x_info.ImmAllZerosV)), (VCVTTPS2UQQZ128rmkz VK2WM:$mask, addr:$src)>; } let Predicates = [HasVLX] in { def : Pat<(v2f64 (X86any_VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))), (VCVTDQ2PDZ128rm addr:$src)>; - def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), - VR128X:$src0)), + def : Pat<(v2f64 (vselect_mask VK2WM:$mask, + (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + VR128X:$src0)), (VCVTDQ2PDZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; - def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), - v2f64x_info.ImmAllZerosV)), + def : Pat<(v2f64 (vselect_mask VK2WM:$mask, + (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + v2f64x_info.ImmAllZerosV)), (VCVTDQ2PDZ128rmkz VK2WM:$mask, addr:$src)>; def : Pat<(v2f64 (X86any_VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))), (VCVTUDQ2PDZ128rm addr:$src)>; - def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), - VR128X:$src0)), + def : Pat<(v2f64 (vselect_mask VK2WM:$mask, + (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + VR128X:$src0)), (VCVTUDQ2PDZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; - def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), - v2f64x_info.ImmAllZerosV)), + def : Pat<(v2f64 (vselect_mask VK2WM:$mask, + (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + v2f64x_info.ImmAllZerosV)), (VCVTUDQ2PDZ128rmkz VK2WM:$mask, addr:$src)>; } @@ -9098,14 +9108,14 @@ // same order as X86vmtrunc, X86vmtruncs, X86vmtruncus. This allows us to pass // either to the multiclasses. def select_trunc : PatFrag<(ops node:$src, node:$src0, node:$mask), - (vselect node:$mask, - (trunc node:$src), node:$src0)>; + (vselect_mask node:$mask, + (trunc node:$src), node:$src0)>; def select_truncs : PatFrag<(ops node:$src, node:$src0, node:$mask), - (vselect node:$mask, - (X86vtruncs node:$src), node:$src0)>; + (vselect_mask node:$mask, + (X86vtruncs node:$src), node:$src0)>; def select_truncus : PatFrag<(ops node:$src, node:$src0, node:$mask), - (vselect node:$mask, - (X86vtruncus node:$src), node:$src0)>; + (vselect_mask node:$mask, + (X86vtruncus node:$src), node:$src0)>; multiclass avx512_trunc_common opc, string OpcodeStr, SDNode OpNode, SDPatternOperator MaskNode, @@ -10503,40 +10513,40 @@ multiclass avx512_vpalign_mask_lowering { - def : Pat<(To.VT (vselect To.KRCWM:$mask, - (bitconvert - (From.VT (OpNode From.RC:$src1, From.RC:$src2, - timm:$src3))), - To.RC:$src0)), + def : Pat<(To.VT (vselect_mask To.KRCWM:$mask, + (bitconvert + (From.VT (OpNode From.RC:$src1, From.RC:$src2, + timm:$src3))), + To.RC:$src0)), (!cast(OpcodeStr#"rrik") To.RC:$src0, To.KRCWM:$mask, To.RC:$src1, To.RC:$src2, (ImmXForm timm:$src3))>; - def : Pat<(To.VT (vselect To.KRCWM:$mask, - (bitconvert - (From.VT (OpNode From.RC:$src1, From.RC:$src2, - timm:$src3))), - To.ImmAllZerosV)), + def : Pat<(To.VT (vselect_mask To.KRCWM:$mask, + (bitconvert + (From.VT (OpNode From.RC:$src1, From.RC:$src2, + timm:$src3))), + To.ImmAllZerosV)), (!cast(OpcodeStr#"rrikz") To.KRCWM:$mask, To.RC:$src1, To.RC:$src2, (ImmXForm timm:$src3))>; - def : Pat<(To.VT (vselect To.KRCWM:$mask, - (bitconvert - (From.VT (OpNode From.RC:$src1, - (From.LdFrag addr:$src2), - timm:$src3))), - To.RC:$src0)), + def : Pat<(To.VT (vselect_mask To.KRCWM:$mask, + (bitconvert + (From.VT (OpNode From.RC:$src1, + (From.LdFrag addr:$src2), + timm:$src3))), + To.RC:$src0)), (!cast(OpcodeStr#"rmik") To.RC:$src0, To.KRCWM:$mask, To.RC:$src1, addr:$src2, (ImmXForm timm:$src3))>; - def : Pat<(To.VT (vselect To.KRCWM:$mask, - (bitconvert - (From.VT (OpNode From.RC:$src1, - (From.LdFrag addr:$src2), - timm:$src3))), - To.ImmAllZerosV)), + def : Pat<(To.VT (vselect_mask To.KRCWM:$mask, + (bitconvert + (From.VT (OpNode From.RC:$src1, + (From.LdFrag addr:$src2), + timm:$src3))), + To.ImmAllZerosV)), (!cast(OpcodeStr#"rmikz") To.KRCWM:$mask, To.RC:$src1, addr:$src2, (ImmXForm timm:$src3))>; @@ -10553,24 +10563,24 @@ (!cast(OpcodeStr#"rmbi") To.RC:$src1, addr:$src2, (ImmXForm timm:$src3))>; - def : Pat<(To.VT (vselect To.KRCWM:$mask, - (bitconvert - (From.VT (OpNode From.RC:$src1, - (bitconvert - (To.VT (To.BroadcastLdFrag addr:$src2))), - timm:$src3))), - To.RC:$src0)), + def : Pat<(To.VT (vselect_mask To.KRCWM:$mask, + (bitconvert + (From.VT (OpNode From.RC:$src1, + (bitconvert + (To.VT (To.BroadcastLdFrag addr:$src2))), + timm:$src3))), + To.RC:$src0)), (!cast(OpcodeStr#"rmbik") To.RC:$src0, To.KRCWM:$mask, To.RC:$src1, addr:$src2, (ImmXForm timm:$src3))>; - def : Pat<(To.VT (vselect To.KRCWM:$mask, - (bitconvert - (From.VT (OpNode From.RC:$src1, - (bitconvert - (To.VT (To.BroadcastLdFrag addr:$src2))), - timm:$src3))), - To.ImmAllZerosV)), + def : Pat<(To.VT (vselect_mask To.KRCWM:$mask, + (bitconvert + (From.VT (OpNode From.RC:$src1, + (bitconvert + (To.VT (To.BroadcastLdFrag addr:$src2))), + timm:$src3))), + To.ImmAllZerosV)), (!cast(OpcodeStr#"rmbikz") To.KRCWM:$mask, To.RC:$src1, addr:$src2, (ImmXForm timm:$src3))>; @@ -10814,19 +10824,19 @@ def : Pat<(v2f64 (X86VBroadcast f64:$src)), (VMOVDDUPZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>; -def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)), - (v2f64 VR128X:$src0)), +def : Pat<(vselect_mask (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)), + (v2f64 VR128X:$src0)), (VMOVDDUPZ128rrk VR128X:$src0, VK2WM:$mask, (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>; -def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)), - immAllZerosV), +def : Pat<(vselect_mask (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)), + immAllZerosV), (VMOVDDUPZ128rrkz VK2WM:$mask, (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>; -def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcastld64 addr:$src)), - (v2f64 VR128X:$src0)), +def : Pat<(vselect_mask (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcastld64 addr:$src)), + (v2f64 VR128X:$src0)), (VMOVDDUPZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; -def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcastld64 addr:$src)), - immAllZerosV), +def : Pat<(vselect_mask (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcastld64 addr:$src)), + immAllZerosV), (VMOVDDUPZ128rmkz VK2WM:$mask, addr:$src)>; } @@ -11167,12 +11177,12 @@ }// Constraints = "$src1 = $dst" // Additional patterns for matching passthru operand in other positions. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src3, _.RC:$src2, _.RC:$src1, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rrik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, _.RC:$src3, (VPTERNLOG321_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rrik) _.RC:$src1, _.KRCWM:$mask, @@ -11191,13 +11201,13 @@ // Additional patterns for matching zero masking with loads in other // positions. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode (bitconvert (_.LdFrag addr:$src3)), _.RC:$src2, _.RC:$src1, (i8 timm:$src4)), _.ImmAllZerosV)), (!cast(Name#_.ZSuffix#rmikz) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src1, (bitconvert (_.LdFrag addr:$src3)), _.RC:$src2, (i8 timm:$src4)), _.ImmAllZerosV)), @@ -11206,31 +11216,31 @@ // Additional patterns for matching masked loads with different // operand orders. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src1, (bitconvert (_.LdFrag addr:$src3)), _.RC:$src2, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG132_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode (bitconvert (_.LdFrag addr:$src3)), _.RC:$src2, _.RC:$src1, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src2, _.RC:$src1, (bitconvert (_.LdFrag addr:$src3)), (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG213_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src2, (bitconvert (_.LdFrag addr:$src3)), _.RC:$src1, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG231_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode (bitconvert (_.LdFrag addr:$src3)), _.RC:$src1, _.RC:$src2, (i8 timm:$src4)), _.RC:$src1)), @@ -11250,14 +11260,14 @@ // Additional patterns for matching zero masking with broadcasts in other // positions. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode (_.BroadcastLdFrag addr:$src3), _.RC:$src2, _.RC:$src1, (i8 timm:$src4)), _.ImmAllZerosV)), (!cast(Name#_.ZSuffix#rmbikz) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src1, (_.BroadcastLdFrag addr:$src3), _.RC:$src2, (i8 timm:$src4)), @@ -11268,32 +11278,32 @@ // Additional patterns for matching masked broadcasts with different // operand orders. - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src1, (_.BroadcastLdFrag addr:$src3), _.RC:$src2, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmbik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG132_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode (_.BroadcastLdFrag addr:$src3), _.RC:$src2, _.RC:$src1, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmbik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src2, _.RC:$src1, (_.BroadcastLdFrag addr:$src3), (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmbik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG213_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode _.RC:$src2, (_.BroadcastLdFrag addr:$src3), _.RC:$src1, (i8 timm:$src4)), _.RC:$src1)), (!cast(Name#_.ZSuffix#rmbik) _.RC:$src1, _.KRCWM:$mask, _.RC:$src2, addr:$src3, (VPTERNLOG231_imm8 timm:$src4))>; - def : Pat<(_.VT (vselect _.KRCWM:$mask, + def : Pat<(_.VT (vselect_mask _.KRCWM:$mask, (OpNode (_.BroadcastLdFrag addr:$src3), _.RC:$src1, _.RC:$src2, (i8 timm:$src4)), _.RC:$src1)), diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll --- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll +++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll @@ -315,9 +315,9 @@ define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) { ; CHECK-LABEL: test14: ; CHECK: ## %bb.0: -; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x7d,0x48,0xfa,0xd1] -; CHECK-NEXT: vpcmpgtd %zmm0, %zmm2, %k1 ## encoding: [0x62,0xf1,0x6d,0x48,0x66,0xc8] -; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0xfa,0xc1] +; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm1 ## encoding: [0x62,0xf1,0x7d,0x48,0xfa,0xc9] +; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 ## encoding: [0x62,0xf1,0x75,0x48,0x66,0xc8] +; CHECK-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x6f,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %sub_r = sub <16 x i32> %a, %b %cmp.i2.i = icmp sgt <16 x i32> %sub_r, %a @@ -330,9 +330,9 @@ define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) { ; CHECK-LABEL: test15: ; CHECK: ## %bb.0: -; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xfd,0x48,0xfb,0xd1] -; CHECK-NEXT: vpcmpgtq %zmm0, %zmm2, %k1 ## encoding: [0x62,0xf2,0xed,0x48,0x37,0xc8] -; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0xfb,0xc1] +; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm1 ## encoding: [0x62,0xf1,0xfd,0x48,0xfb,0xc9] +; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x48,0x37,0xc8] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x6f,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %sub_r = sub <8 x i64> %a, %b %cmp.i2.i = icmp sgt <8 x i64> %sub_r, %a