Index: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp +++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -815,6 +815,26 @@ CurDAG->DeleteNode(N); continue; } + case ISD::ANY_EXTEND: + case ISD::ANY_EXTEND_VECTOR_INREG: { + // Replace vector any extend with the zero extend equivalents so we don't + // need 2 sets of patterns. Ignore vXi1 extensions. + if (!N->getValueType(0).isVector() || + N->getOperand(0).getScalarValueSizeInBits() == 1) + break; + + unsigned NewOpc = N->getOpcode() == ISD::ANY_EXTEND + ? ISD::ZERO_EXTEND + : ISD::ZERO_EXTEND_VECTOR_INREG; + + SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0), + N->getOperand(0)); + --I; + CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); + ++I; + CurDAG->DeleteNode(N); + continue; + } case ISD::FCEIL: case ISD::FFLOOR: case ISD::FTRUNC: Index: llvm/trunk/lib/Target/X86/X86InstrAVX512.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrAVX512.td +++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td @@ -9732,41 +9732,6 @@ } } -multiclass AVX512_pmovx_patterns_aext : - AVX512_pmovx_patterns_base { - let Predicates = [HasVLX, HasBWI] in { - def : Pat<(v16i16 (ExtOp (v16i8 VR128X:$src))), - (!cast(OpcPrefix#BWZ256rr) VR128X:$src)>; - } - - let Predicates = [HasVLX] in { - def : Pat<(v8i32 (ExtOp (v8i16 VR128X:$src))), - (!cast(OpcPrefix#WDZ256rr) VR128X:$src)>; - - def : Pat<(v4i64 (ExtOp (v4i32 VR128X:$src))), - (!cast(OpcPrefix#DQZ256rr) VR128X:$src)>; - } - - // 512-bit patterns - let Predicates = [HasBWI] in { - def : Pat<(v32i16 (ExtOp (v32i8 VR256X:$src))), - (!cast(OpcPrefix#BWZrr) VR256X:$src)>; - } - let Predicates = [HasAVX512] in { - def : Pat<(v16i32 (ExtOp (v16i8 VR128X:$src))), - (!cast(OpcPrefix#BDZrr) VR128X:$src)>; - def : Pat<(v16i32 (ExtOp (v16i16 VR256X:$src))), - (!cast(OpcPrefix#WDZrr) VR256X:$src)>; - - def : Pat<(v8i64 (ExtOp (v8i16 VR128X:$src))), - (!cast(OpcPrefix#WQZrr) VR128X:$src)>; - - def : Pat<(v8i64 (ExtOp (v8i32 VR256X:$src))), - (!cast(OpcPrefix#DQZrr) VR256X:$src)>; - } -} - - multiclass AVX512_pmovx_patterns : AVX512_pmovx_patterns_base { @@ -9872,7 +9837,6 @@ defm : AVX512_pmovx_patterns<"VPMOVSX", sext, sext_invec>; defm : AVX512_pmovx_patterns<"VPMOVZX", zext, zext_invec>; -defm : AVX512_pmovx_patterns_aext<"VPMOVZX", anyext>; // Without BWI we can't do a trunc from v16i16 to v16i8. DAG combine can merge // ext+trunc aggresively making it impossible to legalize the DAG to this Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -4895,6 +4895,7 @@ //===----------------------------------------------------------------------===// // SSE4.1 - Packed Move with Sign/Zero Extend +// NOTE: Any Extend is promoted to Zero Extend in X86ISelDAGToDAG.cpp //===----------------------------------------------------------------------===// multiclass SS41I_pmovx_rrrm opc, string OpcodeStr, X86MemOperand MemOp, @@ -4942,71 +4943,42 @@ defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem, NoVLX>; -// Patterns that we also need for any_extend. -// Any_extend_vector_inreg is currently legalized to zero_extend_vector_inreg. -multiclass SS41I_pmovx_avx2_patterns_base { - // Register-Register patterns - let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in { - def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))), - (!cast(OpcPrefix#BWYrr) VR128:$src)>; - } - - let Predicates = [HasAVX2, NoVLX] in { - def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))), - (!cast(OpcPrefix#WDYrr) VR128:$src)>; - - def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))), - (!cast(OpcPrefix#DQYrr) VR128:$src)>; - } - - // AVX2 Register-Memory patterns - let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in { - def : Pat<(v16i16 (ExtOp (loadv16i8 addr:$src))), - (!cast(OpcPrefix#BWYrm) addr:$src)>; - def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))), - (!cast(OpcPrefix#BWYrm) addr:$src)>; - def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))), - (!cast(OpcPrefix#BWYrm) addr:$src)>; - } - - let Predicates = [HasAVX2, NoVLX] in { - def : Pat<(v8i32 (ExtOp (loadv8i16 addr:$src))), - (!cast(OpcPrefix#WDYrm) addr:$src)>; - def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))), - (!cast(OpcPrefix#WDYrm) addr:$src)>; - def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))), - (!cast(OpcPrefix#WDYrm) addr:$src)>; - - def : Pat<(v4i64 (ExtOp (loadv4i32 addr:$src))), - (!cast(OpcPrefix#DQYrm) addr:$src)>; - def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))), - (!cast(OpcPrefix#DQYrm) addr:$src)>; - def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))), - (!cast(OpcPrefix#DQYrm) addr:$src)>; - } -} - // AVX2 Patterns multiclass SS41I_pmovx_avx2_patterns : - SS41I_pmovx_avx2_patterns_base { - + SDNode ExtOp, SDNode InVecOp> { // Register-Register patterns + let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in { + def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))), + (!cast(OpcPrefix#BWYrr) VR128:$src)>; + } let Predicates = [HasAVX2, NoVLX] in { def : Pat<(v8i32 (InVecOp (v16i8 VR128:$src))), (!cast(OpcPrefix#BDYrr) VR128:$src)>; def : Pat<(v4i64 (InVecOp (v16i8 VR128:$src))), (!cast(OpcPrefix#BQYrr) VR128:$src)>; + def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))), + (!cast(OpcPrefix#WDYrr) VR128:$src)>; def : Pat<(v4i64 (InVecOp (v8i16 VR128:$src))), (!cast(OpcPrefix#WQYrr) VR128:$src)>; + + def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))), + (!cast(OpcPrefix#DQYrr) VR128:$src)>; } // Simple Register-Memory patterns let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in { def : Pat<(v16i16 (!cast(ExtTy#"extloadvi8") addr:$src)), (!cast(OpcPrefix#BWYrm) addr:$src)>; + + def : Pat<(v16i16 (ExtOp (loadv16i8 addr:$src))), + (!cast(OpcPrefix#BWYrm) addr:$src)>; + def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))), + (!cast(OpcPrefix#BWYrm) addr:$src)>; + def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))), + (!cast(OpcPrefix#BWYrm) addr:$src)>; } + let Predicates = [HasAVX2, NoVLX] in { def : Pat<(v8i32 (!cast(ExtTy#"extloadvi8") addr:$src)), (!cast(OpcPrefix#BDYrm) addr:$src)>; @@ -5024,6 +4996,13 @@ // AVX2 Register-Memory patterns let Predicates = [HasAVX2, NoVLX] in { + def : Pat<(v8i32 (ExtOp (loadv8i16 addr:$src))), + (!cast(OpcPrefix#WDYrm) addr:$src)>; + def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))), + (!cast(OpcPrefix#WDYrm) addr:$src)>; + def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))), + (!cast(OpcPrefix#WDYrm) addr:$src)>; + def : Pat<(v8i32 (InVecOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))), (!cast(OpcPrefix#BDYrm) addr:$src)>; def : Pat<(v8i32 (InVecOp (v16i8 (vzmovl_v2i64 addr:$src)))), @@ -5033,6 +5012,13 @@ def : Pat<(v8i32 (InVecOp (loadv16i8 addr:$src))), (!cast(OpcPrefix#BDYrm) addr:$src)>; + def : Pat<(v4i64 (ExtOp (loadv4i32 addr:$src))), + (!cast(OpcPrefix#DQYrm) addr:$src)>; + def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))), + (!cast(OpcPrefix#DQYrm) addr:$src)>; + def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))), + (!cast(OpcPrefix#DQYrm) addr:$src)>; + def : Pat<(v4i64 (InVecOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))), (!cast(OpcPrefix#BQYrm) addr:$src)>; def : Pat<(v4i64 (InVecOp (v16i8 (vzmovl_v4i32 addr:$src)))), @@ -5055,7 +5041,6 @@ defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", sext, sext_invec>; defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", zext, zext_invec>; -defm : SS41I_pmovx_avx2_patterns_base<"VPMOVZX", anyext>; // SSE4.1/AVX patterns. multiclass SS41I_pmovx_patterns