Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -5288,6 +5288,12 @@ return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); } + if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) { + SDValue InvRHS = DAG.getNOT(dl, RHS, VT); + SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); + return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); + } + if (VT.isVector()) { // TODO: Consider not scalarizing here. return SDValue(); Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -1784,6 +1784,10 @@ { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq + { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd + { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq + { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq + { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq }; static const CostTblEntry XOPCostTbl[] = { { ISD::BITREVERSE, MVT::v4i64, 4 }, @@ -1828,6 +1832,7 @@ { ISD::USUBSAT, MVT::v16i16, 1 }, { ISD::USUBSAT, MVT::v32i8, 1 }, { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd + { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ @@ -1864,6 +1869,7 @@ { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert + { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ @@ -1885,6 +1891,7 @@ }; static const CostTblEntry SSE42CostTbl[] = { { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd + { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ }; Index: test/Analysis/CostModel/X86/arith-usat.ll =================================================================== --- test/Analysis/CostModel/X86/arith-usat.ll +++ test/Analysis/CostModel/X86/arith-usat.ll @@ -35,24 +35,43 @@ declare <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8>, <64 x i8>) define i32 @add(i32 %arg) { -; SSE-LABEL: 'add' -; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef) -; SSE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; SSSE3-LABEL: 'add' +; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef) +; SSSE3-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; +; SSE42-LABEL: 'add' +; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32I16 = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> undef, <32 x i16> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I8 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32I8 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> undef, <32 x i8> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V64I8 = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> undef, <64 x i8> undef) +; SSE42-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef ; ; AVX1-LABEL: 'add' ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) @@ -60,9 +79,9 @@ ; AVX1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; AVX1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX1-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -79,9 +98,9 @@ ; AVX2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; AVX2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -94,13 +113,13 @@ ; ; AVX512F-LABEL: 'add' ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX512F-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX512F-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX512F-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -113,13 +132,13 @@ ; ; AVX512BW-LABEL: 'add' ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX512BW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX512BW-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX512BW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -132,13 +151,13 @@ ; ; AVX512DQ-LABEL: 'add' ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I64 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2I64 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; AVX512DQ-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -155,9 +174,9 @@ ; SLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; SLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; SLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; SLM-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; SLM-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; SLM-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; SLM-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; SLM-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -174,9 +193,9 @@ ; GLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; GLM-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; GLM-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; GLM-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; GLM-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; GLM-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; GLM-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; GLM-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) @@ -193,9 +212,9 @@ ; BTVER2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef) -; BTVER2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) -; BTVER2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) -; BTVER2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) +; BTVER2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef) +; BTVER2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef) +; BTVER2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef) ; BTVER2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef) Index: test/CodeGen/X86/uadd_sat.ll =================================================================== --- test/CodeGen/X86/uadd_sat.ll +++ test/CodeGen/X86/uadd_sat.ll @@ -111,37 +111,18 @@ ; ; X64-LABEL: vec: ; X64: # %bb.0: -; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] -; X64-NEXT: movd %xmm2, %eax -; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] -; X64-NEXT: movd %xmm2, %ecx -; X64-NEXT: addl %eax, %ecx -; X64-NEXT: movl $-1, %eax -; X64-NEXT: cmovbl %eax, %ecx -; X64-NEXT: movd %ecx, %xmm2 -; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; X64-NEXT: movd %xmm3, %ecx -; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] -; X64-NEXT: movd %xmm3, %edx -; X64-NEXT: addl %ecx, %edx -; X64-NEXT: cmovbl %eax, %edx -; X64-NEXT: movd %edx, %xmm3 -; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; X64-NEXT: movd %xmm1, %ecx -; X64-NEXT: movd %xmm0, %edx -; X64-NEXT: addl %ecx, %edx -; X64-NEXT: cmovbl %eax, %edx -; X64-NEXT: movd %edx, %xmm2 -; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; X64-NEXT: movd %xmm1, %ecx -; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; X64-NEXT: movd %xmm0, %edx -; X64-NEXT: addl %ecx, %edx -; X64-NEXT: cmovbl %eax, %edx -; X64-NEXT: movd %edx, %xmm0 -; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; X64-NEXT: movdqa %xmm2, %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; X64-NEXT: pxor %xmm0, %xmm2 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647] +; X64-NEXT: pxor %xmm1, %xmm3 +; X64-NEXT: pcmpgtd %xmm2, %xmm3 +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: pcmpeqd %xmm2, %xmm2 +; X64-NEXT: pxor %xmm3, %xmm2 +; X64-NEXT: movdqa %xmm1, %xmm3 +; X64-NEXT: pandn %xmm2, %xmm3 +; X64-NEXT: por %xmm3, %xmm0 +; X64-NEXT: paddd %xmm1, %xmm0 ; X64-NEXT: retq %tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y); ret <4 x i32> %tmp; Index: test/CodeGen/X86/uadd_sat_vec.ll =================================================================== --- test/CodeGen/X86/uadd_sat_vec.ll +++ test/CodeGen/X86/uadd_sat_vec.ll @@ -635,85 +635,118 @@ define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { ; SSE2-LABEL: v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: psllq $32, %xmm1 -; SSE2-NEXT: movq %xmm1, %rax ; SSE2-NEXT: psllq $32, %xmm0 -; SSE2-NEXT: movq %xmm0, %rcx -; SSE2-NEXT: addq %rax, %rcx -; SSE2-NEXT: movq $-1, %rax -; SSE2-NEXT: cmovbq %rax, %rcx -; SSE2-NEXT: movq %rcx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm1, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm0 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSE2-NEXT: psrlq $32, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: psllq $32, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159] +; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm2, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; SSE2-NEXT: por %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddq %xmm1, %xmm0 +; SSE2-NEXT: psrlq $32, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v2i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: psllq $32, %xmm1 -; SSSE3-NEXT: movq %xmm1, %rax ; SSSE3-NEXT: psllq $32, %xmm0 -; SSSE3-NEXT: movq %xmm0, %rcx -; SSSE3-NEXT: addq %rax, %rcx -; SSSE3-NEXT: movq $-1, %rax -; SSSE3-NEXT: cmovbq %rax, %rcx -; SSSE3-NEXT: movq %rcx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm1, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSSE3-NEXT: psrlq $32, %xmm2 -; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: psllq $32, %xmm1 +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159] +; SSSE3-NEXT: pxor %xmm1, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] +; SSSE3-NEXT: pand %xmm5, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; SSSE3-NEXT: por %xmm2, %xmm3 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm1, %xmm3 +; SSSE3-NEXT: pandn %xmm2, %xmm3 +; SSSE3-NEXT: por %xmm3, %xmm0 +; SSSE3-NEXT: paddq %xmm1, %xmm0 +; SSSE3-NEXT: psrlq $32, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v2i32: ; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllq $32, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456] +; SSE41-NEXT: pxor %xmm2, %xmm0 ; SSE41-NEXT: psllq $32, %xmm1 -; SSE41-NEXT: pextrq $1, %xmm1, %rax -; SSE41-NEXT: psllq $32, %xmm0 -; SSE41-NEXT: pextrq $1, %xmm0, %rcx -; SSE41-NEXT: addq %rax, %rcx -; SSE41-NEXT: movq $-1, %rax -; SSE41-NEXT: cmovbq %rax, %rcx -; SSE41-NEXT: movq %rcx, %xmm2 -; SSE41-NEXT: movq %xmm1, %rcx -; SSE41-NEXT: movq %xmm0, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm0 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE41-NEXT: psrlq $32, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159] +; SSE41-NEXT: pxor %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pcmpgtd %xmm0, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm0, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3] +; SSE41-NEXT: pand %xmm5, %xmm0 +; SSE41-NEXT: por %xmm4, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm1, %xmm3 +; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3 +; SSE41-NEXT: paddq %xmm1, %xmm3 +; SSE41-NEXT: psrlq $32, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: v2i32: -; AVX: # %bb.0: -; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX-NEXT: vpextrq $1, %xmm1, %rax -; AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX-NEXT: vpextrq $1, %xmm0, %rcx -; AVX-NEXT: addq %rax, %rcx -; AVX-NEXT: movq $-1, %rax -; AVX-NEXT: cmovbq %rax, %rcx -; AVX-NEXT: vmovq %rcx, %xmm2 -; AVX-NEXT: vmovq %xmm1, %rcx -; AVX-NEXT: vmovq %xmm0, %rdx -; AVX-NEXT: addq %rcx, %rdx -; AVX-NEXT: cmovbq %rax, %rdx -; AVX-NEXT: vmovq %rdx, %xmm0 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3 +; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2 +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3 +; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4 +; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 +; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa %xmm1, %xmm2 +; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2 +; AVX512-NEXT: vpminuq %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX512-NEXT: retq %z = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %x, <2 x i32> %y) ret <2 x i32> %z } @@ -721,124 +754,67 @@ define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; SSE2-LABEL: v4i32: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] -; SSE2-NEXT: movd %xmm2, %eax -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: addl %eax, %ecx -; SSE2-NEXT: movl $-1, %eax -; SSE2-NEXT: cmovbl %eax, %ecx -; SSE2-NEXT: movd %ecx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm3, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm3, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm3 -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE2-NEXT: movd %xmm1, %ecx -; SSE2-NEXT: movd %xmm0, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE2-NEXT: movd %xmm1, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-NEXT: movd %xmm0, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm0 -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647] +; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v4i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] -; SSSE3-NEXT: movd %xmm2, %eax -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: addl %eax, %ecx -; SSSE3-NEXT: movl $-1, %eax -; SSSE3-NEXT: cmovbl %eax, %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; SSSE3-NEXT: movd %xmm3, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm3, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm3 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSSE3-NEXT: movd %xmm1, %ecx -; SSSE3-NEXT: movd %xmm0, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSSE3-NEXT: movd %xmm1, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSSE3-NEXT: movd %xmm0, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm0 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647] +; SSSE3-NEXT: pxor %xmm1, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm1, %xmm3 +; SSSE3-NEXT: pandn %xmm2, %xmm3 +; SSSE3-NEXT: por %xmm3, %xmm0 +; SSSE3-NEXT: paddd %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v4i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pextrd $1, %xmm1, %eax -; SSE41-NEXT: pextrd $1, %xmm0, %ecx -; SSE41-NEXT: addl %eax, %ecx -; SSE41-NEXT: movl $-1, %eax -; SSE41-NEXT: cmovbl %eax, %ecx -; SSE41-NEXT: movd %xmm1, %edx -; SSE41-NEXT: movd %xmm0, %esi -; SSE41-NEXT: addl %edx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm2 -; SSE41-NEXT: pinsrd $1, %ecx, %xmm2 -; SSE41-NEXT: pextrd $2, %xmm1, %ecx -; SSE41-NEXT: pextrd $2, %xmm0, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm2 -; SSE41-NEXT: pextrd $3, %xmm1, %ecx -; SSE41-NEXT: pextrd $3, %xmm0, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: pxor %xmm1, %xmm2 +; SSE41-NEXT: pminud %xmm2, %xmm0 +; SSE41-NEXT: paddd %xmm1, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: v4i32: -; AVX: # %bb.0: -; AVX-NEXT: vpextrd $1, %xmm1, %eax -; AVX-NEXT: vpextrd $1, %xmm0, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movl $-1, %eax -; AVX-NEXT: cmovbl %eax, %ecx -; AVX-NEXT: vmovd %xmm1, %edx -; AVX-NEXT: vmovd %xmm0, %esi -; AVX-NEXT: addl %edx, %esi -; AVX-NEXT: cmovbl %eax, %esi -; AVX-NEXT: vmovd %esi, %xmm2 -; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $2, %xmm1, %ecx -; AVX-NEXT: vpextrd $2, %xmm0, %edx -; AVX-NEXT: addl %ecx, %edx -; AVX-NEXT: cmovbl %eax, %edx -; AVX-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 -; AVX-NEXT: vpextrd $3, %xmm1, %ecx -; AVX-NEXT: vpextrd $3, %xmm0, %edx -; AVX-NEXT: addl %ecx, %edx -; AVX-NEXT: cmovbl %eax, %edx -; AVX-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: v4i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2 +; AVX2-NEXT: vpminud %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa %xmm1, %xmm2 +; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2 +; AVX512-NEXT: vpminud %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq %z = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) ret <4 x i32> %z } @@ -846,323 +822,99 @@ define <8 x i32> @v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { ; SSE2-LABEL: v8i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] -; SSE2-NEXT: movd %xmm0, %ecx -; SSE2-NEXT: addl %eax, %ecx -; SSE2-NEXT: movl $-1, %eax -; SSE2-NEXT: cmovbl %eax, %ecx -; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] -; SSE2-NEXT: movd %xmm5, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1] -; SSE2-NEXT: movd %xmm5, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: movd %xmm4, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,3] -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm4 -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; SSE2-NEXT: movd %xmm3, %ecx -; SSE2-NEXT: movd %xmm1, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] -; SSE2-NEXT: movd %xmm3, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE2-NEXT: movd %xmm1, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: movdqa %xmm0, %xmm5 +; SSE2-NEXT: pxor %xmm4, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647,2147483647,2147483647] +; SSE2-NEXT: movdqa %xmm2, %xmm7 +; SSE2-NEXT: pxor %xmm6, %xmm7 +; SSE2-NEXT: pcmpgtd %xmm5, %xmm7 +; SSE2-NEXT: pand %xmm7, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm8 +; SSE2-NEXT: pxor %xmm8, %xmm7 +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: pandn %xmm7, %xmm5 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm4 +; SSE2-NEXT: pxor %xmm3, %xmm6 +; SSE2-NEXT: pcmpgtd %xmm4, %xmm6 +; SSE2-NEXT: pand %xmm6, %xmm1 +; SSE2-NEXT: pxor %xmm8, %xmm6 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: pandn %xmm6, %xmm2 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: paddd %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa %xmm0, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] -; SSSE3-NEXT: movd %xmm0, %ecx -; SSSE3-NEXT: addl %eax, %ecx -; SSSE3-NEXT: movl $-1, %eax -; SSSE3-NEXT: cmovbl %eax, %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] -; SSSE3-NEXT: movd %xmm5, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1] -; SSSE3-NEXT: movd %xmm5, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm5 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: movd %xmm4, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1] -; SSSE3-NEXT: movd %xmm4, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSSE3-NEXT: movd %xmm4, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm4 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; SSSE3-NEXT: movd %xmm3, %ecx -; SSSE3-NEXT: movd %xmm1, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] -; SSSE3-NEXT: movd %xmm3, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSSE3-NEXT: movd %xmm1, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm1 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm0, %xmm5 +; SSSE3-NEXT: pxor %xmm4, %xmm5 +; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647,2147483647,2147483647] +; SSSE3-NEXT: movdqa %xmm2, %xmm7 +; SSSE3-NEXT: pxor %xmm6, %xmm7 +; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7 +; SSSE3-NEXT: pand %xmm7, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm8, %xmm8 +; SSSE3-NEXT: pxor %xmm8, %xmm7 +; SSSE3-NEXT: movdqa %xmm2, %xmm5 +; SSSE3-NEXT: pandn %xmm7, %xmm5 +; SSSE3-NEXT: por %xmm5, %xmm0 +; SSSE3-NEXT: paddd %xmm2, %xmm0 +; SSSE3-NEXT: pxor %xmm1, %xmm4 +; SSSE3-NEXT: pxor %xmm3, %xmm6 +; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 +; SSSE3-NEXT: pand %xmm6, %xmm1 +; SSSE3-NEXT: pxor %xmm8, %xmm6 +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: pandn %xmm6, %xmm2 +; SSSE3-NEXT: por %xmm2, %xmm1 +; SSSE3-NEXT: paddd %xmm3, %xmm1 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v8i32: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm4 -; SSE41-NEXT: pextrd $1, %xmm2, %eax -; SSE41-NEXT: pextrd $1, %xmm0, %ecx -; SSE41-NEXT: addl %eax, %ecx -; SSE41-NEXT: movl $-1, %eax -; SSE41-NEXT: cmovbl %eax, %ecx -; SSE41-NEXT: movd %xmm2, %edx -; SSE41-NEXT: movd %xmm0, %esi -; SSE41-NEXT: addl %edx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm0 -; SSE41-NEXT: pinsrd $1, %ecx, %xmm0 -; SSE41-NEXT: pextrd $2, %xmm2, %ecx -; SSE41-NEXT: pextrd $2, %xmm4, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm0 -; SSE41-NEXT: pextrd $3, %xmm2, %ecx -; SSE41-NEXT: pextrd $3, %xmm4, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm0 -; SSE41-NEXT: pextrd $1, %xmm3, %ecx -; SSE41-NEXT: pextrd $1, %xmm1, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: movd %xmm3, %ecx -; SSE41-NEXT: movd %xmm1, %esi -; SSE41-NEXT: addl %ecx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm2 -; SSE41-NEXT: pinsrd $1, %edx, %xmm2 -; SSE41-NEXT: pextrd $2, %xmm3, %ecx -; SSE41-NEXT: pextrd $2, %xmm1, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm2 -; SSE41-NEXT: pextrd $3, %xmm3, %ecx -; SSE41-NEXT: pextrd $3, %xmm1, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE41-NEXT: movdqa %xmm2, %xmm5 +; SSE41-NEXT: pxor %xmm4, %xmm5 +; SSE41-NEXT: pminud %xmm5, %xmm0 +; SSE41-NEXT: paddd %xmm2, %xmm0 +; SSE41-NEXT: pxor %xmm3, %xmm4 +; SSE41-NEXT: pminud %xmm4, %xmm1 +; SSE41-NEXT: paddd %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; AVX1-LABEL: v8i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpextrd $1, %xmm2, %eax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpextrd $1, %xmm3, %ecx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl $-1, %eax -; AVX1-NEXT: cmovbl %eax, %ecx -; AVX1-NEXT: vmovd %xmm2, %edx -; AVX1-NEXT: vmovd %xmm3, %esi -; AVX1-NEXT: addl %edx, %esi -; AVX1-NEXT: cmovbl %eax, %esi -; AVX1-NEXT: vmovd %esi, %xmm4 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4 -; AVX1-NEXT: vpextrd $2, %xmm2, %ecx -; AVX1-NEXT: vpextrd $2, %xmm3, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX1-NEXT: vpextrd $3, %xmm2, %ecx -; AVX1-NEXT: vpextrd $3, %xmm3, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 -; AVX1-NEXT: vpextrd $1, %xmm1, %ecx -; AVX1-NEXT: vpextrd $1, %xmm0, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: addl %ecx, %esi -; AVX1-NEXT: cmovbl %eax, %esi -; AVX1-NEXT: vmovd %esi, %xmm3 -; AVX1-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3 -; AVX1-NEXT: vpextrd $2, %xmm1, %ecx -; AVX1-NEXT: vpextrd $2, %xmm0, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3 -; AVX1-NEXT: vpextrd $3, %xmm1, %ecx -; AVX1-NEXT: vpextrd $3, %xmm0, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2 +; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm2 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpminud %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpextrd $1, %xmm2, %eax -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpextrd $1, %xmm3, %ecx -; AVX2-NEXT: addl %eax, %ecx -; AVX2-NEXT: movl $-1, %eax -; AVX2-NEXT: cmovbl %eax, %ecx -; AVX2-NEXT: vmovd %xmm2, %edx -; AVX2-NEXT: vmovd %xmm3, %esi -; AVX2-NEXT: addl %edx, %esi -; AVX2-NEXT: cmovbl %eax, %esi -; AVX2-NEXT: vmovd %esi, %xmm4 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4 -; AVX2-NEXT: vpextrd $2, %xmm2, %ecx -; AVX2-NEXT: vpextrd $2, %xmm3, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX2-NEXT: vpextrd $3, %xmm2, %ecx -; AVX2-NEXT: vpextrd $3, %xmm3, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 -; AVX2-NEXT: vpextrd $1, %xmm1, %ecx -; AVX2-NEXT: vpextrd $1, %xmm0, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: addl %ecx, %esi -; AVX2-NEXT: cmovbl %eax, %esi -; AVX2-NEXT: vmovd %esi, %xmm3 -; AVX2-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3 -; AVX2-NEXT: vpextrd $2, %xmm1, %ecx -; AVX2-NEXT: vpextrd $2, %xmm0, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3 -; AVX2-NEXT: vpextrd $3, %xmm1, %ecx -; AVX2-NEXT: vpextrd $3, %xmm0, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2 +; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vpextrd $1, %xmm2, %eax -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512-NEXT: vpextrd $1, %xmm3, %ecx -; AVX512-NEXT: addl %eax, %ecx -; AVX512-NEXT: movl $-1, %eax -; AVX512-NEXT: cmovbl %eax, %ecx -; AVX512-NEXT: vmovd %xmm2, %edx -; AVX512-NEXT: vmovd %xmm3, %esi -; AVX512-NEXT: addl %edx, %esi -; AVX512-NEXT: cmovbl %eax, %esi -; AVX512-NEXT: vmovd %esi, %xmm4 -; AVX512-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $2, %xmm2, %ecx -; AVX512-NEXT: vpextrd $2, %xmm3, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $3, %xmm2, %ecx -; AVX512-NEXT: vpextrd $3, %xmm3, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 -; AVX512-NEXT: vpextrd $1, %xmm1, %ecx -; AVX512-NEXT: vpextrd $1, %xmm0, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vmovd %xmm1, %ecx -; AVX512-NEXT: vmovd %xmm0, %esi -; AVX512-NEXT: addl %ecx, %esi -; AVX512-NEXT: cmovbl %eax, %esi -; AVX512-NEXT: vmovd %esi, %xmm3 -; AVX512-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3 -; AVX512-NEXT: vpextrd $2, %xmm1, %ecx -; AVX512-NEXT: vpextrd $2, %xmm0, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3 -; AVX512-NEXT: vpextrd $3, %xmm1, %ecx -; AVX512-NEXT: vpextrd $3, %xmm0, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0 -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa %ymm1, %ymm2 +; AVX512-NEXT: vpternlogq $15, %ymm1, %ymm1, %ymm2 +; AVX512-NEXT: vpminud %ymm2, %ymm0, %ymm0 +; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %z = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %x, <8 x i32> %y) ret <8 x i32> %z @@ -1171,617 +923,163 @@ define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind { ; SSE2-LABEL: v16i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm1, %xmm8 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] -; SSE2-NEXT: movd %xmm0, %ecx -; SSE2-NEXT: addl %eax, %ecx -; SSE2-NEXT: movl $-1, %eax -; SSE2-NEXT: cmovbl %eax, %ecx -; SSE2-NEXT: movd %ecx, %xmm9 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm10 -; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1] -; SSE2-NEXT: movd %xmm4, %ecx -; SSE2-NEXT: movd %xmm1, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] -; SSE2-NEXT: movd %xmm4, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSE2-NEXT: movd %xmm1, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,1,2,3] -; SSE2-NEXT: movd %xmm1, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,3] -; SSE2-NEXT: movd %xmm1, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] -; SSE2-NEXT: movd %xmm4, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm4 -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] -; SSE2-NEXT: movd %xmm5, %ecx -; SSE2-NEXT: movd %xmm8, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] -; SSE2-NEXT: movd %xmm5, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,3] -; SSE2-NEXT: movd %xmm5, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3] -; SSE2-NEXT: movd %xmm4, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3] -; SSE2-NEXT: movd %xmm4, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] -; SSE2-NEXT: movd %xmm5, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] -; SSE2-NEXT: movd %xmm5, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; SSE2-NEXT: movd %xmm6, %ecx -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3] -; SSE2-NEXT: movd %xmm6, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,1,2,3] -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1] -; SSE2-NEXT: movd %xmm5, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] -; SSE2-NEXT: movd %xmm5, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm6 -; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1] -; SSE2-NEXT: movd %xmm7, %ecx -; SSE2-NEXT: movd %xmm3, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,3] -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,2,3] -; SSE2-NEXT: movd %xmm2, %edx -; SSE2-NEXT: addl %ecx, %edx -; SSE2-NEXT: cmovbl %eax, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0] -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: movdqa %xmm5, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: movdqa %xmm0, %xmm10 +; SSE2-NEXT: pxor %xmm9, %xmm10 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647,2147483647,2147483647] +; SSE2-NEXT: movdqa %xmm4, %xmm11 +; SSE2-NEXT: pxor %xmm8, %xmm11 +; SSE2-NEXT: pcmpgtd %xmm10, %xmm11 +; SSE2-NEXT: pand %xmm11, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm10, %xmm10 +; SSE2-NEXT: pxor %xmm10, %xmm11 +; SSE2-NEXT: movdqa %xmm4, %xmm12 +; SSE2-NEXT: pandn %xmm11, %xmm12 +; SSE2-NEXT: por %xmm12, %xmm0 +; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm11 +; SSE2-NEXT: pxor %xmm9, %xmm11 +; SSE2-NEXT: movdqa %xmm5, %xmm12 +; SSE2-NEXT: pxor %xmm8, %xmm12 +; SSE2-NEXT: pcmpgtd %xmm11, %xmm12 +; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pxor %xmm10, %xmm12 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pandn %xmm12, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: paddd %xmm5, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pxor %xmm9, %xmm4 +; SSE2-NEXT: movdqa %xmm6, %xmm5 +; SSE2-NEXT: pxor %xmm8, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm4, %xmm5 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: pxor %xmm10, %xmm5 +; SSE2-NEXT: movdqa %xmm6, %xmm4 +; SSE2-NEXT: pandn %xmm5, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: paddd %xmm6, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm9 +; SSE2-NEXT: pxor %xmm7, %xmm8 +; SSE2-NEXT: pcmpgtd %xmm9, %xmm8 +; SSE2-NEXT: pand %xmm8, %xmm3 +; SSE2-NEXT: pxor %xmm10, %xmm8 +; SSE2-NEXT: movdqa %xmm7, %xmm4 +; SSE2-NEXT: pandn %xmm8, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm3 +; SSE2-NEXT: paddd %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v16i32: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa %xmm1, %xmm8 -; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3] -; SSSE3-NEXT: movd %xmm0, %eax -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] -; SSSE3-NEXT: movd %xmm0, %ecx -; SSSE3-NEXT: addl %eax, %ecx -; SSSE3-NEXT: movl $-1, %eax -; SSSE3-NEXT: cmovbl %eax, %ecx -; SSSE3-NEXT: movd %ecx, %xmm9 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm10 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1] -; SSSE3-NEXT: movd %xmm4, %ecx -; SSSE3-NEXT: movd %xmm1, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] -; SSSE3-NEXT: movd %xmm4, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] -; SSSE3-NEXT: movd %xmm1, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm1 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0] -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,1,2,3] -; SSSE3-NEXT: movd %xmm1, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,3] -; SSSE3-NEXT: movd %xmm1, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] -; SSSE3-NEXT: movd %xmm4, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] -; SSSE3-NEXT: movd %xmm4, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm4 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] -; SSSE3-NEXT: movd %xmm5, %ecx -; SSSE3-NEXT: movd %xmm8, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] -; SSSE3-NEXT: movd %xmm5, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,3] -; SSSE3-NEXT: movd %xmm5, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm5 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3] -; SSSE3-NEXT: movd %xmm4, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3] -; SSSE3-NEXT: movd %xmm4, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] -; SSSE3-NEXT: movd %xmm5, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] -; SSSE3-NEXT: movd %xmm5, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm5 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; SSSE3-NEXT: movd %xmm6, %ecx -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3] -; SSSE3-NEXT: movd %xmm6, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,1,2,3] -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3] -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1] -; SSSE3-NEXT: movd %xmm5, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] -; SSSE3-NEXT: movd %xmm5, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm6 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1] -; SSSE3-NEXT: movd %xmm7, %ecx -; SSSE3-NEXT: movd %xmm3, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,2,3] -; SSSE3-NEXT: movd %xmm2, %edx -; SSSE3-NEXT: addl %ecx, %edx -; SSSE3-NEXT: cmovbl %eax, %edx -; SSSE3-NEXT: movd %edx, %xmm2 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0] -; SSSE3-NEXT: movdqa %xmm4, %xmm2 -; SSSE3-NEXT: movdqa %xmm5, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm0, %xmm10 +; SSSE3-NEXT: pxor %xmm9, %xmm10 +; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647,2147483647,2147483647] +; SSSE3-NEXT: movdqa %xmm4, %xmm11 +; SSSE3-NEXT: pxor %xmm8, %xmm11 +; SSSE3-NEXT: pcmpgtd %xmm10, %xmm11 +; SSSE3-NEXT: pand %xmm11, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm10 +; SSSE3-NEXT: pxor %xmm10, %xmm11 +; SSSE3-NEXT: movdqa %xmm4, %xmm12 +; SSSE3-NEXT: pandn %xmm11, %xmm12 +; SSSE3-NEXT: por %xmm12, %xmm0 +; SSSE3-NEXT: paddd %xmm4, %xmm0 +; SSSE3-NEXT: movdqa %xmm1, %xmm11 +; SSSE3-NEXT: pxor %xmm9, %xmm11 +; SSSE3-NEXT: movdqa %xmm5, %xmm12 +; SSSE3-NEXT: pxor %xmm8, %xmm12 +; SSSE3-NEXT: pcmpgtd %xmm11, %xmm12 +; SSSE3-NEXT: pand %xmm12, %xmm1 +; SSSE3-NEXT: pxor %xmm10, %xmm12 +; SSSE3-NEXT: movdqa %xmm5, %xmm4 +; SSSE3-NEXT: pandn %xmm12, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: paddd %xmm5, %xmm1 +; SSSE3-NEXT: movdqa %xmm2, %xmm4 +; SSSE3-NEXT: pxor %xmm9, %xmm4 +; SSSE3-NEXT: movdqa %xmm6, %xmm5 +; SSSE3-NEXT: pxor %xmm8, %xmm5 +; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5 +; SSSE3-NEXT: pand %xmm5, %xmm2 +; SSSE3-NEXT: pxor %xmm10, %xmm5 +; SSSE3-NEXT: movdqa %xmm6, %xmm4 +; SSSE3-NEXT: pandn %xmm5, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm2 +; SSSE3-NEXT: paddd %xmm6, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm9 +; SSSE3-NEXT: pxor %xmm7, %xmm8 +; SSSE3-NEXT: pcmpgtd %xmm9, %xmm8 +; SSSE3-NEXT: pand %xmm8, %xmm3 +; SSSE3-NEXT: pxor %xmm10, %xmm8 +; SSSE3-NEXT: movdqa %xmm7, %xmm4 +; SSSE3-NEXT: pandn %xmm8, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm3 +; SSSE3-NEXT: paddd %xmm7, %xmm3 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v16i32: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm1, %xmm8 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pextrd $1, %xmm4, %eax -; SSE41-NEXT: pextrd $1, %xmm0, %ecx -; SSE41-NEXT: addl %eax, %ecx -; SSE41-NEXT: movl $-1, %eax -; SSE41-NEXT: cmovbl %eax, %ecx -; SSE41-NEXT: movd %xmm4, %edx -; SSE41-NEXT: movd %xmm0, %esi -; SSE41-NEXT: addl %edx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm0 -; SSE41-NEXT: pinsrd $1, %ecx, %xmm0 -; SSE41-NEXT: pextrd $2, %xmm4, %ecx -; SSE41-NEXT: pextrd $2, %xmm1, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm0 -; SSE41-NEXT: pextrd $3, %xmm4, %ecx -; SSE41-NEXT: pextrd $3, %xmm1, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm0 -; SSE41-NEXT: pextrd $1, %xmm5, %ecx -; SSE41-NEXT: pextrd $1, %xmm8, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: movd %xmm5, %ecx -; SSE41-NEXT: movd %xmm8, %esi -; SSE41-NEXT: addl %ecx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm1 -; SSE41-NEXT: pinsrd $1, %edx, %xmm1 -; SSE41-NEXT: pextrd $2, %xmm5, %ecx -; SSE41-NEXT: pextrd $2, %xmm8, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm1 -; SSE41-NEXT: pextrd $3, %xmm5, %ecx -; SSE41-NEXT: pextrd $3, %xmm8, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm1 -; SSE41-NEXT: pextrd $1, %xmm6, %ecx -; SSE41-NEXT: pextrd $1, %xmm2, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: movd %xmm6, %ecx -; SSE41-NEXT: movd %xmm2, %esi -; SSE41-NEXT: addl %ecx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm4 -; SSE41-NEXT: pinsrd $1, %edx, %xmm4 -; SSE41-NEXT: pextrd $2, %xmm6, %ecx -; SSE41-NEXT: pextrd $2, %xmm2, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm4 -; SSE41-NEXT: pextrd $3, %xmm6, %ecx -; SSE41-NEXT: pextrd $3, %xmm2, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm4 -; SSE41-NEXT: pextrd $1, %xmm7, %ecx -; SSE41-NEXT: pextrd $1, %xmm3, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: movd %xmm7, %ecx -; SSE41-NEXT: movd %xmm3, %esi -; SSE41-NEXT: addl %ecx, %esi -; SSE41-NEXT: cmovbl %eax, %esi -; SSE41-NEXT: movd %esi, %xmm5 -; SSE41-NEXT: pinsrd $1, %edx, %xmm5 -; SSE41-NEXT: pextrd $2, %xmm7, %ecx -; SSE41-NEXT: pextrd $2, %xmm3, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $2, %edx, %xmm5 -; SSE41-NEXT: pextrd $3, %xmm7, %ecx -; SSE41-NEXT: pextrd $3, %xmm3, %edx -; SSE41-NEXT: addl %ecx, %edx -; SSE41-NEXT: cmovbl %eax, %edx -; SSE41-NEXT: pinsrd $3, %edx, %xmm5 -; SSE41-NEXT: movdqa %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm5, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm8, %xmm8 +; SSE41-NEXT: movdqa %xmm4, %xmm9 +; SSE41-NEXT: pxor %xmm8, %xmm9 +; SSE41-NEXT: pminud %xmm9, %xmm0 +; SSE41-NEXT: paddd %xmm4, %xmm0 +; SSE41-NEXT: movdqa %xmm5, %xmm4 +; SSE41-NEXT: pxor %xmm8, %xmm4 +; SSE41-NEXT: pminud %xmm4, %xmm1 +; SSE41-NEXT: paddd %xmm5, %xmm1 +; SSE41-NEXT: movdqa %xmm6, %xmm4 +; SSE41-NEXT: pxor %xmm8, %xmm4 +; SSE41-NEXT: pminud %xmm4, %xmm2 +; SSE41-NEXT: paddd %xmm6, %xmm2 +; SSE41-NEXT: pxor %xmm7, %xmm8 +; SSE41-NEXT: pminud %xmm8, %xmm3 +; SSE41-NEXT: paddd %xmm7, %xmm3 ; SSE41-NEXT: retq ; ; AVX1-LABEL: v16i32: ; AVX1: # %bb.0: +; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vcmptrueps %ymm4, %ymm4, %ymm4 +; AVX1-NEXT: vxorps %ymm4, %ymm2, %ymm5 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7 +; AVX1-NEXT: vpminud %xmm6, %xmm7, %xmm6 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7 +; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpminud %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0 +; AVX1-NEXT: vxorps %ymm4, %ymm3, %ymm2 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpextrd $1, %xmm4, %eax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX1-NEXT: vpextrd $1, %xmm5, %ecx -; AVX1-NEXT: addl %eax, %ecx -; AVX1-NEXT: movl $-1, %eax -; AVX1-NEXT: cmovbl %eax, %ecx -; AVX1-NEXT: vmovd %xmm4, %edx -; AVX1-NEXT: vmovd %xmm5, %esi -; AVX1-NEXT: addl %edx, %esi -; AVX1-NEXT: cmovbl %eax, %esi -; AVX1-NEXT: vmovd %esi, %xmm6 -; AVX1-NEXT: vpinsrd $1, %ecx, %xmm6, %xmm6 -; AVX1-NEXT: vpextrd $2, %xmm4, %ecx -; AVX1-NEXT: vpextrd $2, %xmm5, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6 -; AVX1-NEXT: vpextrd $3, %xmm4, %ecx -; AVX1-NEXT: vpextrd $3, %xmm5, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4 -; AVX1-NEXT: vpextrd $1, %xmm2, %ecx -; AVX1-NEXT: vpextrd $1, %xmm0, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vmovd %xmm2, %ecx -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: addl %ecx, %esi -; AVX1-NEXT: cmovbl %eax, %esi -; AVX1-NEXT: vmovd %esi, %xmm5 -; AVX1-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 -; AVX1-NEXT: vpextrd $2, %xmm2, %ecx -; AVX1-NEXT: vpextrd $2, %xmm0, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 -; AVX1-NEXT: vpextrd $3, %xmm2, %ecx -; AVX1-NEXT: vpextrd $3, %xmm0, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 -; AVX1-NEXT: vpextrd $1, %xmm2, %ecx -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 -; AVX1-NEXT: vpextrd $1, %xmm4, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vmovd %xmm2, %ecx -; AVX1-NEXT: vmovd %xmm4, %esi -; AVX1-NEXT: addl %ecx, %esi -; AVX1-NEXT: cmovbl %eax, %esi -; AVX1-NEXT: vmovd %esi, %xmm5 -; AVX1-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 -; AVX1-NEXT: vpextrd $2, %xmm2, %ecx -; AVX1-NEXT: vpextrd $2, %xmm4, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 -; AVX1-NEXT: vpextrd $3, %xmm2, %ecx -; AVX1-NEXT: vpextrd $3, %xmm4, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2 -; AVX1-NEXT: vpextrd $1, %xmm3, %ecx -; AVX1-NEXT: vpextrd $1, %xmm1, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vmovd %xmm3, %ecx -; AVX1-NEXT: vmovd %xmm1, %esi -; AVX1-NEXT: addl %ecx, %esi -; AVX1-NEXT: cmovbl %eax, %esi -; AVX1-NEXT: vmovd %esi, %xmm4 -; AVX1-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 -; AVX1-NEXT: vpextrd $2, %xmm3, %ecx -; AVX1-NEXT: vpextrd $2, %xmm1, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX1-NEXT: vpextrd $3, %xmm3, %ecx -; AVX1-NEXT: vpextrd $3, %xmm1, %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: cmovbl %eax, %edx -; AVX1-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 +; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5 +; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpminud %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v16i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 -; AVX2-NEXT: vpextrd $1, %xmm4, %eax -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 -; AVX2-NEXT: vpextrd $1, %xmm5, %ecx -; AVX2-NEXT: addl %eax, %ecx -; AVX2-NEXT: movl $-1, %eax -; AVX2-NEXT: cmovbl %eax, %ecx -; AVX2-NEXT: vmovd %xmm4, %edx -; AVX2-NEXT: vmovd %xmm5, %esi -; AVX2-NEXT: addl %edx, %esi -; AVX2-NEXT: cmovbl %eax, %esi -; AVX2-NEXT: vmovd %esi, %xmm6 -; AVX2-NEXT: vpinsrd $1, %ecx, %xmm6, %xmm6 -; AVX2-NEXT: vpextrd $2, %xmm4, %ecx -; AVX2-NEXT: vpextrd $2, %xmm5, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6 -; AVX2-NEXT: vpextrd $3, %xmm4, %ecx -; AVX2-NEXT: vpextrd $3, %xmm5, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4 -; AVX2-NEXT: vpextrd $1, %xmm2, %ecx -; AVX2-NEXT: vpextrd $1, %xmm0, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vmovd %xmm2, %ecx -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: addl %ecx, %esi -; AVX2-NEXT: cmovbl %eax, %esi -; AVX2-NEXT: vmovd %esi, %xmm5 -; AVX2-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 -; AVX2-NEXT: vpextrd $2, %xmm2, %ecx -; AVX2-NEXT: vpextrd $2, %xmm0, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 -; AVX2-NEXT: vpextrd $3, %xmm2, %ecx -; AVX2-NEXT: vpextrd $3, %xmm0, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 -; AVX2-NEXT: vpextrd $1, %xmm2, %ecx -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4 -; AVX2-NEXT: vpextrd $1, %xmm4, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vmovd %xmm2, %ecx -; AVX2-NEXT: vmovd %xmm4, %esi -; AVX2-NEXT: addl %ecx, %esi -; AVX2-NEXT: cmovbl %eax, %esi -; AVX2-NEXT: vmovd %esi, %xmm5 -; AVX2-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 -; AVX2-NEXT: vpextrd $2, %xmm2, %ecx -; AVX2-NEXT: vpextrd $2, %xmm4, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 -; AVX2-NEXT: vpextrd $3, %xmm2, %ecx -; AVX2-NEXT: vpextrd $3, %xmm4, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2 -; AVX2-NEXT: vpextrd $1, %xmm3, %ecx -; AVX2-NEXT: vpextrd $1, %xmm1, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vmovd %xmm3, %ecx -; AVX2-NEXT: vmovd %xmm1, %esi -; AVX2-NEXT: addl %ecx, %esi -; AVX2-NEXT: cmovbl %eax, %esi -; AVX2-NEXT: vmovd %esi, %xmm4 -; AVX2-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 -; AVX2-NEXT: vpextrd $2, %xmm3, %ecx -; AVX2-NEXT: vpextrd $2, %xmm1, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX2-NEXT: vpextrd $3, %xmm3, %ecx -; AVX2-NEXT: vpextrd $3, %xmm1, %edx -; AVX2-NEXT: addl %ecx, %edx -; AVX2-NEXT: cmovbl %eax, %edx -; AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4 +; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm5 +; AVX2-NEXT: vpminud %ymm5, %ymm0, %ymm0 +; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm2 +; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v16i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm2 -; AVX512-NEXT: vpextrd $1, %xmm2, %eax -; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm3 -; AVX512-NEXT: vpextrd $1, %xmm3, %ecx -; AVX512-NEXT: addl %eax, %ecx -; AVX512-NEXT: movl $-1, %eax -; AVX512-NEXT: cmovbl %eax, %ecx -; AVX512-NEXT: vmovd %xmm2, %edx -; AVX512-NEXT: vmovd %xmm3, %esi -; AVX512-NEXT: addl %edx, %esi -; AVX512-NEXT: cmovbl %eax, %esi -; AVX512-NEXT: vmovd %esi, %xmm4 -; AVX512-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $2, %xmm2, %ecx -; AVX512-NEXT: vpextrd $2, %xmm3, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $3, %xmm2, %ecx -; AVX512-NEXT: vpextrd $3, %xmm3, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2 -; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3 -; AVX512-NEXT: vpextrd $1, %xmm3, %ecx -; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm4 -; AVX512-NEXT: vpextrd $1, %xmm4, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vmovd %xmm3, %ecx -; AVX512-NEXT: vmovd %xmm4, %esi -; AVX512-NEXT: addl %ecx, %esi -; AVX512-NEXT: cmovbl %eax, %esi -; AVX512-NEXT: vmovd %esi, %xmm5 -; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 -; AVX512-NEXT: vpextrd $2, %xmm3, %ecx -; AVX512-NEXT: vpextrd $2, %xmm4, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 -; AVX512-NEXT: vpextrd $3, %xmm3, %ecx -; AVX512-NEXT: vpextrd $3, %xmm4, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3 -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX512-NEXT: vpextrd $1, %xmm3, %ecx -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4 -; AVX512-NEXT: vpextrd $1, %xmm4, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vmovd %xmm3, %ecx -; AVX512-NEXT: vmovd %xmm4, %esi -; AVX512-NEXT: addl %ecx, %esi -; AVX512-NEXT: cmovbl %eax, %esi -; AVX512-NEXT: vmovd %esi, %xmm5 -; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5 -; AVX512-NEXT: vpextrd $2, %xmm3, %ecx -; AVX512-NEXT: vpextrd $2, %xmm4, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5 -; AVX512-NEXT: vpextrd $3, %xmm3, %ecx -; AVX512-NEXT: vpextrd $3, %xmm4, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3 -; AVX512-NEXT: vpextrd $1, %xmm1, %ecx -; AVX512-NEXT: vpextrd $1, %xmm0, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vmovd %xmm1, %ecx -; AVX512-NEXT: vmovd %xmm0, %esi -; AVX512-NEXT: addl %ecx, %esi -; AVX512-NEXT: cmovbl %eax, %esi -; AVX512-NEXT: vmovd %esi, %xmm4 -; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $2, %xmm1, %ecx -; AVX512-NEXT: vpextrd $2, %xmm0, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $3, %xmm1, %ecx -; AVX512-NEXT: vpextrd $3, %xmm0, %edx -; AVX512-NEXT: addl %ecx, %edx -; AVX512-NEXT: cmovbl %eax, %edx -; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0 -; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vmovdqa64 %zmm1, %zmm2 +; AVX512-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm2 +; AVX512-NEXT: vpminud %zmm2, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retq %z = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %x, <16 x i32> %y) ret <16 x i32> %z @@ -1790,73 +1088,100 @@ define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; SSE2-LABEL: v2i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movq %xmm1, %rax -; SSE2-NEXT: movq %xmm0, %rcx -; SSE2-NEXT: addq %rax, %rcx -; SSE2-NEXT: movq $-1, %rax -; SSE2-NEXT: cmovbq %rax, %rcx -; SSE2-NEXT: movq %rcx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm1, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm0 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159] +; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm2, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; SSE2-NEXT: por %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddq %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v2i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movq %xmm1, %rax -; SSSE3-NEXT: movq %xmm0, %rcx -; SSSE3-NEXT: addq %rax, %rcx -; SSSE3-NEXT: movq $-1, %rax -; SSSE3-NEXT: cmovbq %rax, %rcx -; SSSE3-NEXT: movq %rcx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm1, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159] +; SSSE3-NEXT: pxor %xmm1, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] +; SSSE3-NEXT: pand %xmm5, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] +; SSSE3-NEXT: por %xmm2, %xmm3 +; SSSE3-NEXT: pand %xmm3, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm1, %xmm3 +; SSSE3-NEXT: pandn %xmm2, %xmm3 +; SSSE3-NEXT: por %xmm3, %xmm0 +; SSSE3-NEXT: paddq %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v2i64: ; SSE41: # %bb.0: -; SSE41-NEXT: pextrq $1, %xmm1, %rax -; SSE41-NEXT: pextrq $1, %xmm0, %rcx -; SSE41-NEXT: addq %rax, %rcx -; SSE41-NEXT: movq $-1, %rax -; SSE41-NEXT: cmovbq %rax, %rcx -; SSE41-NEXT: movq %rcx, %xmm2 -; SSE41-NEXT: movq %xmm1, %rcx -; SSE41-NEXT: movq %xmm0, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm0 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456] +; SSE41-NEXT: pxor %xmm2, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159] +; SSE41-NEXT: pxor %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pcmpgtd %xmm0, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm0, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3] +; SSE41-NEXT: pand %xmm5, %xmm0 +; SSE41-NEXT: por %xmm4, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm1, %xmm3 +; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3 +; SSE41-NEXT: paddq %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: v2i64: -; AVX: # %bb.0: -; AVX-NEXT: vpextrq $1, %xmm1, %rax -; AVX-NEXT: vpextrq $1, %xmm0, %rcx -; AVX-NEXT: addq %rax, %rcx -; AVX-NEXT: movq $-1, %rax -; AVX-NEXT: cmovbq %rax, %rcx -; AVX-NEXT: vmovq %rcx, %xmm2 -; AVX-NEXT: vmovq %xmm1, %rcx -; AVX-NEXT: vmovq %xmm0, %rdx -; AVX-NEXT: addq %rcx, %rdx -; AVX-NEXT: cmovbq %rax, %rdx -; AVX-NEXT: vmovq %rdx, %xmm0 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX-NEXT: retq +; AVX1-LABEL: v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3 +; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2 +; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3 +; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4 +; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 +; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa %xmm1, %xmm2 +; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2 +; AVX512-NEXT: vpminuq %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq %z = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %x, <2 x i64> %y) ret <2 x i64> %z } @@ -1864,185 +1189,164 @@ define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind { ; SSE2-LABEL: v4i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movq %xmm2, %rax -; SSE2-NEXT: movq %xmm0, %rcx -; SSE2-NEXT: addq %rax, %rcx -; SSE2-NEXT: movq $-1, %rax -; SSE2-NEXT: cmovbq %rax, %rcx -; SSE2-NEXT: movq %rcx, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE2-NEXT: movq %xmm2, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm0 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] -; SSE2-NEXT: movq %xmm3, %rcx -; SSE2-NEXT: movq %xmm1, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm0 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSE2-NEXT: movdqa %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456] +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: pxor %xmm8, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [9223372034707292159,9223372034707292159] +; SSE2-NEXT: movdqa %xmm2, %xmm7 +; SSE2-NEXT: pxor %xmm5, %xmm7 +; SSE2-NEXT: movdqa %xmm7, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm6, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm6, %xmm7 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] +; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE2-NEXT: por %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm6, %xmm6 +; SSE2-NEXT: pxor %xmm6, %xmm4 +; SSE2-NEXT: movdqa %xmm2, %xmm7 +; SSE2-NEXT: pandn %xmm4, %xmm7 +; SSE2-NEXT: por %xmm7, %xmm0 +; SSE2-NEXT: paddq %xmm2, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm8 +; SSE2-NEXT: pxor %xmm3, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm8, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm8, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE2-NEXT: por %xmm5, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pxor %xmm6, %xmm2 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: paddq %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v4i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movq %xmm2, %rax -; SSSE3-NEXT: movq %xmm0, %rcx -; SSSE3-NEXT: addq %rax, %rcx -; SSSE3-NEXT: movq $-1, %rax -; SSSE3-NEXT: cmovbq %rax, %rcx -; SSSE3-NEXT: movq %rcx, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSSE3-NEXT: movq %xmm2, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] -; SSSE3-NEXT: movq %xmm3, %rcx -; SSSE3-NEXT: movq %xmm1, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm0, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] -; SSSE3-NEXT: movdqa %xmm4, %xmm0 -; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456] +; SSSE3-NEXT: movdqa %xmm0, %xmm6 +; SSSE3-NEXT: pxor %xmm8, %xmm6 +; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [9223372034707292159,9223372034707292159] +; SSSE3-NEXT: movdqa %xmm2, %xmm7 +; SSSE3-NEXT: pxor %xmm5, %xmm7 +; SSSE3-NEXT: movdqa %xmm7, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm6, %xmm7 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] +; SSSE3-NEXT: pand %xmm9, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSSE3-NEXT: por %xmm6, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm6, %xmm6 +; SSSE3-NEXT: pxor %xmm6, %xmm4 +; SSSE3-NEXT: movdqa %xmm2, %xmm7 +; SSSE3-NEXT: pandn %xmm4, %xmm7 +; SSSE3-NEXT: por %xmm7, %xmm0 +; SSSE3-NEXT: paddq %xmm2, %xmm0 +; SSSE3-NEXT: pxor %xmm1, %xmm8 +; SSSE3-NEXT: pxor %xmm3, %xmm5 +; SSSE3-NEXT: movdqa %xmm5, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm8, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm8, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSSE3-NEXT: pand %xmm4, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSSE3-NEXT: por %xmm5, %xmm2 +; SSSE3-NEXT: pand %xmm2, %xmm1 +; SSSE3-NEXT: pxor %xmm6, %xmm2 +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pandn %xmm2, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: paddq %xmm3, %xmm1 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v4i64: ; SSE41: # %bb.0: -; SSE41-NEXT: pextrq $1, %xmm2, %rax -; SSE41-NEXT: pextrq $1, %xmm0, %rcx -; SSE41-NEXT: addq %rax, %rcx -; SSE41-NEXT: movq $-1, %rax -; SSE41-NEXT: cmovbq %rax, %rcx -; SSE41-NEXT: movq %rcx, %xmm4 -; SSE41-NEXT: movq %xmm2, %rcx -; SSE41-NEXT: movq %xmm0, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm0 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] -; SSE41-NEXT: pextrq $1, %xmm3, %rcx -; SSE41-NEXT: pextrq $1, %xmm1, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm2 -; SSE41-NEXT: movq %xmm3, %rcx -; SSE41-NEXT: movq %xmm1, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm1 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE41-NEXT: movdqa %xmm0, %xmm8 +; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456] +; SSE41-NEXT: pxor %xmm7, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372034707292159,9223372034707292159] +; SSE41-NEXT: movdqa %xmm2, %xmm5 +; SSE41-NEXT: pxor %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm5, %xmm6 +; SSE41-NEXT: pcmpgtd %xmm0, %xmm6 +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm0, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3] +; SSE41-NEXT: pand %xmm9, %xmm0 +; SSE41-NEXT: por %xmm6, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm5, %xmm5 +; SSE41-NEXT: movdqa %xmm2, %xmm6 +; SSE41-NEXT: pxor %xmm5, %xmm6 +; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm6 +; SSE41-NEXT: paddq %xmm2, %xmm6 +; SSE41-NEXT: pxor %xmm1, %xmm7 +; SSE41-NEXT: pxor %xmm3, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm2 +; SSE41-NEXT: pcmpgtd %xmm7, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm2[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm7, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] +; SSE41-NEXT: pand %xmm8, %xmm0 +; SSE41-NEXT: por %xmm2, %xmm0 +; SSE41-NEXT: pxor %xmm3, %xmm5 +; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5 +; SSE41-NEXT: paddq %xmm3, %xmm5 +; SSE41-NEXT: movdqa %xmm6, %xmm0 +; SSE41-NEXT: movdqa %xmm5, %xmm1 ; SSE41-NEXT: retq ; ; AVX1-LABEL: v4i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpextrq $1, %xmm2, %rax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpextrq $1, %xmm3, %rcx -; AVX1-NEXT: addq %rax, %rcx -; AVX1-NEXT: movq $-1, %rax -; AVX1-NEXT: cmovbq %rax, %rcx -; AVX1-NEXT: vmovq %rcx, %xmm4 -; AVX1-NEXT: vmovq %xmm2, %rcx -; AVX1-NEXT: vmovq %xmm3, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; AVX1-NEXT: vpextrq $1, %xmm1, %rcx -; AVX1-NEXT: vpextrq $1, %xmm0, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm3 -; AVX1-NEXT: vmovq %xmm1, %rcx -; AVX1-NEXT: vmovq %xmm0, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm0 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vcmptrueps %ymm4, %ymm4, %ymm4 +; AVX1-NEXT: vxorps %ymm4, %ymm1, %ymm4 +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5 +; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm5 +; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm4, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpextrq $1, %xmm2, %rax -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpextrq $1, %xmm3, %rcx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: movq $-1, %rax -; AVX2-NEXT: cmovbq %rax, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm4 -; AVX2-NEXT: vmovq %xmm2, %rcx -; AVX2-NEXT: vmovq %xmm3, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm2 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; AVX2-NEXT: vpextrq $1, %xmm1, %rcx -; AVX2-NEXT: vpextrq $1, %xmm0, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm3 -; AVX2-NEXT: vmovq %xmm1, %rcx -; AVX2-NEXT: vmovq %xmm0, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807] +; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm3 +; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm3 +; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm3, %ymm0 +; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vpextrq $1, %xmm2, %rax -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512-NEXT: vpextrq $1, %xmm3, %rcx -; AVX512-NEXT: addq %rax, %rcx -; AVX512-NEXT: movq $-1, %rax -; AVX512-NEXT: cmovbq %rax, %rcx -; AVX512-NEXT: vmovq %rcx, %xmm4 -; AVX512-NEXT: vmovq %xmm2, %rcx -; AVX512-NEXT: vmovq %xmm3, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm2 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; AVX512-NEXT: vpextrq $1, %xmm1, %rcx -; AVX512-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm3 -; AVX512-NEXT: vmovq %xmm1, %rcx -; AVX512-NEXT: vmovq %xmm0, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm0 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa %ymm1, %ymm2 +; AVX512-NEXT: vpternlogq $15, %ymm1, %ymm1, %ymm2 +; AVX512-NEXT: vpminuq %ymm2, %ymm0, %ymm0 +; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %z = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %x, <4 x i64> %y) ret <4 x i64> %z @@ -2051,341 +1355,290 @@ define <8 x i64> @v8i64(<8 x i64> %x, <8 x i64> %y) nounwind { ; SSE2-LABEL: v8i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm1, %xmm8 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: movq %xmm4, %rax -; SSE2-NEXT: movq %xmm0, %rcx -; SSE2-NEXT: addq %rax, %rcx -; SSE2-NEXT: movq $-1, %rax -; SSE2-NEXT: cmovbq %rax, %rcx -; SSE2-NEXT: movq %rcx, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm1, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm1 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-NEXT: movq %xmm5, %rcx -; SSE2-NEXT: movq %xmm8, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm4 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSE2-NEXT: movq %xmm6, %rcx -; SSE2-NEXT: movq %xmm2, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] -; SSE2-NEXT: movq %xmm5, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE2-NEXT: movq %xmm2, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm2 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] -; SSE2-NEXT: movq %xmm7, %rcx -; SSE2-NEXT: movq %xmm3, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm5 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1] -; SSE2-NEXT: movq %xmm2, %rcx -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] -; SSE2-NEXT: movq %xmm2, %rdx -; SSE2-NEXT: addq %rcx, %rdx -; SSE2-NEXT: cmovbq %rax, %rdx -; SSE2-NEXT: movq %rdx, %xmm2 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0] -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: movdqa %xmm5, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456] +; SSE2-NEXT: movdqa %xmm0, %xmm10 +; SSE2-NEXT: pxor %xmm8, %xmm10 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372034707292159,9223372034707292159] +; SSE2-NEXT: movdqa %xmm4, %xmm11 +; SSE2-NEXT: pxor %xmm9, %xmm11 +; SSE2-NEXT: movdqa %xmm11, %xmm12 +; SSE2-NEXT: pcmpgtd %xmm10, %xmm12 +; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm10, %xmm11 +; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3] +; SSE2-NEXT: pand %xmm13, %xmm10 +; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm12[1,1,3,3] +; SSE2-NEXT: por %xmm10, %xmm11 +; SSE2-NEXT: pand %xmm11, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm10, %xmm10 +; SSE2-NEXT: pxor %xmm10, %xmm11 +; SSE2-NEXT: movdqa %xmm4, %xmm12 +; SSE2-NEXT: pandn %xmm11, %xmm12 +; SSE2-NEXT: por %xmm12, %xmm0 +; SSE2-NEXT: paddq %xmm4, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm11 +; SSE2-NEXT: pxor %xmm8, %xmm11 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pxor %xmm9, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm12 +; SSE2-NEXT: pcmpgtd %xmm11, %xmm12 +; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm11, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm4[1,1,3,3] +; SSE2-NEXT: pand %xmm13, %xmm11 +; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3] +; SSE2-NEXT: por %xmm11, %xmm12 +; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pxor %xmm10, %xmm12 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pandn %xmm12, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: paddq %xmm5, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pxor %xmm8, %xmm4 +; SSE2-NEXT: movdqa %xmm6, %xmm5 +; SSE2-NEXT: pxor %xmm9, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm11 +; SSE2-NEXT: pcmpgtd %xmm4, %xmm11 +; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm4, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] +; SSE2-NEXT: pand %xmm12, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm11[1,1,3,3] +; SSE2-NEXT: por %xmm4, %xmm5 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: pxor %xmm10, %xmm5 +; SSE2-NEXT: movdqa %xmm6, %xmm4 +; SSE2-NEXT: pandn %xmm5, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: paddq %xmm6, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm8 +; SSE2-NEXT: pxor %xmm7, %xmm9 +; SSE2-NEXT: movdqa %xmm9, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm8, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSE2-NEXT: pcmpeqd %xmm8, %xmm9 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3] +; SSE2-NEXT: pand %xmm5, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE2-NEXT: por %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: pxor %xmm10, %xmm4 +; SSE2-NEXT: movdqa %xmm7, %xmm5 +; SSE2-NEXT: pandn %xmm4, %xmm5 +; SSE2-NEXT: por %xmm5, %xmm3 +; SSE2-NEXT: paddq %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: v8i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa %xmm1, %xmm8 -; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: movq %xmm4, %rax -; SSSE3-NEXT: movq %xmm0, %rcx -; SSSE3-NEXT: addq %rax, %rcx -; SSSE3-NEXT: movq $-1, %rax -; SSSE3-NEXT: cmovbq %rax, %rcx -; SSSE3-NEXT: movq %rcx, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] -; SSSE3-NEXT: movq %xmm4, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm1, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm1 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSSE3-NEXT: movq %xmm5, %rcx -; SSSE3-NEXT: movq %xmm8, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] -; SSSE3-NEXT: movq %xmm4, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] -; SSSE3-NEXT: movq %xmm4, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm4 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSSE3-NEXT: movq %xmm6, %rcx -; SSSE3-NEXT: movq %xmm2, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] -; SSSE3-NEXT: movq %xmm5, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSSE3-NEXT: movq %xmm2, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm2 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0] -; SSSE3-NEXT: movq %xmm7, %rcx -; SSSE3-NEXT: movq %xmm3, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm5 -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1] -; SSSE3-NEXT: movq %xmm2, %rcx -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] -; SSSE3-NEXT: movq %xmm2, %rdx -; SSSE3-NEXT: addq %rcx, %rdx -; SSSE3-NEXT: cmovbq %rax, %rdx -; SSSE3-NEXT: movq %rdx, %xmm2 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0] -; SSSE3-NEXT: movdqa %xmm4, %xmm2 -; SSSE3-NEXT: movdqa %xmm5, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456] +; SSSE3-NEXT: movdqa %xmm0, %xmm10 +; SSSE3-NEXT: pxor %xmm8, %xmm10 +; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372034707292159,9223372034707292159] +; SSSE3-NEXT: movdqa %xmm4, %xmm11 +; SSSE3-NEXT: pxor %xmm9, %xmm11 +; SSSE3-NEXT: movdqa %xmm11, %xmm12 +; SSSE3-NEXT: pcmpgtd %xmm10, %xmm12 +; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm11 +; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3] +; SSSE3-NEXT: pand %xmm13, %xmm10 +; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm12[1,1,3,3] +; SSSE3-NEXT: por %xmm10, %xmm11 +; SSSE3-NEXT: pand %xmm11, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm10 +; SSSE3-NEXT: pxor %xmm10, %xmm11 +; SSSE3-NEXT: movdqa %xmm4, %xmm12 +; SSSE3-NEXT: pandn %xmm11, %xmm12 +; SSSE3-NEXT: por %xmm12, %xmm0 +; SSSE3-NEXT: paddq %xmm4, %xmm0 +; SSSE3-NEXT: movdqa %xmm1, %xmm11 +; SSSE3-NEXT: pxor %xmm8, %xmm11 +; SSSE3-NEXT: movdqa %xmm5, %xmm4 +; SSSE3-NEXT: pxor %xmm9, %xmm4 +; SSSE3-NEXT: movdqa %xmm4, %xmm12 +; SSSE3-NEXT: pcmpgtd %xmm11, %xmm12 +; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm11, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm4[1,1,3,3] +; SSSE3-NEXT: pand %xmm13, %xmm11 +; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3] +; SSSE3-NEXT: por %xmm11, %xmm12 +; SSSE3-NEXT: pand %xmm12, %xmm1 +; SSSE3-NEXT: pxor %xmm10, %xmm12 +; SSSE3-NEXT: movdqa %xmm5, %xmm4 +; SSSE3-NEXT: pandn %xmm12, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: paddq %xmm5, %xmm1 +; SSSE3-NEXT: movdqa %xmm2, %xmm4 +; SSSE3-NEXT: pxor %xmm8, %xmm4 +; SSSE3-NEXT: movdqa %xmm6, %xmm5 +; SSSE3-NEXT: pxor %xmm9, %xmm5 +; SSSE3-NEXT: movdqa %xmm5, %xmm11 +; SSSE3-NEXT: pcmpgtd %xmm4, %xmm11 +; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] +; SSSE3-NEXT: pand %xmm12, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm11[1,1,3,3] +; SSSE3-NEXT: por %xmm4, %xmm5 +; SSSE3-NEXT: pand %xmm5, %xmm2 +; SSSE3-NEXT: pxor %xmm10, %xmm5 +; SSSE3-NEXT: movdqa %xmm6, %xmm4 +; SSSE3-NEXT: pandn %xmm5, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm2 +; SSSE3-NEXT: paddq %xmm6, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm8 +; SSSE3-NEXT: pxor %xmm7, %xmm9 +; SSSE3-NEXT: movdqa %xmm9, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm8, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] +; SSSE3-NEXT: pcmpeqd %xmm8, %xmm9 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3] +; SSSE3-NEXT: pand %xmm5, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSSE3-NEXT: por %xmm6, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm3 +; SSSE3-NEXT: pxor %xmm10, %xmm4 +; SSSE3-NEXT: movdqa %xmm7, %xmm5 +; SSSE3-NEXT: pandn %xmm4, %xmm5 +; SSSE3-NEXT: por %xmm5, %xmm3 +; SSSE3-NEXT: paddq %xmm7, %xmm3 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: v8i64: ; SSE41: # %bb.0: -; SSE41-NEXT: pextrq $1, %xmm4, %rax -; SSE41-NEXT: pextrq $1, %xmm0, %rcx -; SSE41-NEXT: addq %rax, %rcx -; SSE41-NEXT: movq $-1, %rax -; SSE41-NEXT: cmovbq %rax, %rcx -; SSE41-NEXT: movq %rcx, %xmm8 -; SSE41-NEXT: movq %xmm4, %rcx -; SSE41-NEXT: movq %xmm0, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm0 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0] -; SSE41-NEXT: pextrq $1, %xmm5, %rcx -; SSE41-NEXT: pextrq $1, %xmm1, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm4 -; SSE41-NEXT: movq %xmm5, %rcx -; SSE41-NEXT: movq %xmm1, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm1 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; SSE41-NEXT: pextrq $1, %xmm6, %rcx -; SSE41-NEXT: pextrq $1, %xmm2, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm4 -; SSE41-NEXT: movq %xmm6, %rcx -; SSE41-NEXT: movq %xmm2, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm2 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; SSE41-NEXT: pextrq $1, %xmm7, %rcx -; SSE41-NEXT: pextrq $1, %xmm3, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm4 -; SSE41-NEXT: movq %xmm7, %rcx -; SSE41-NEXT: movq %xmm3, %rdx -; SSE41-NEXT: addq %rcx, %rdx -; SSE41-NEXT: cmovbq %rax, %rdx -; SSE41-NEXT: movq %rdx, %xmm3 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0] +; SSE41-NEXT: movdqa %xmm1, %xmm9 +; SSE41-NEXT: movdqa %xmm0, %xmm13 +; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [9223372039002259456,9223372039002259456] +; SSE41-NEXT: pxor %xmm11, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm12 = [9223372034707292159,9223372034707292159] +; SSE41-NEXT: movdqa %xmm4, %xmm1 +; SSE41-NEXT: pxor %xmm12, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm8 +; SSE41-NEXT: pcmpgtd %xmm0, %xmm8 +; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE41-NEXT: pand %xmm10, %xmm0 +; SSE41-NEXT: por %xmm8, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm10, %xmm10 +; SSE41-NEXT: movdqa %xmm4, %xmm8 +; SSE41-NEXT: pxor %xmm10, %xmm8 +; SSE41-NEXT: blendvpd %xmm0, %xmm13, %xmm8 +; SSE41-NEXT: paddq %xmm4, %xmm8 +; SSE41-NEXT: movdqa %xmm9, %xmm0 +; SSE41-NEXT: pxor %xmm11, %xmm0 +; SSE41-NEXT: movdqa %xmm5, %xmm1 +; SSE41-NEXT: pxor %xmm12, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: pcmpgtd %xmm0, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm4[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE41-NEXT: pand %xmm13, %xmm0 +; SSE41-NEXT: por %xmm4, %xmm0 +; SSE41-NEXT: movdqa %xmm5, %xmm1 +; SSE41-NEXT: pxor %xmm10, %xmm1 +; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm1 +; SSE41-NEXT: paddq %xmm5, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pxor %xmm11, %xmm0 +; SSE41-NEXT: movdqa %xmm6, %xmm4 +; SSE41-NEXT: pxor %xmm12, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm5 +; SSE41-NEXT: pcmpgtd %xmm0, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm0, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] +; SSE41-NEXT: pand %xmm9, %xmm0 +; SSE41-NEXT: por %xmm5, %xmm0 +; SSE41-NEXT: movdqa %xmm6, %xmm4 +; SSE41-NEXT: pxor %xmm10, %xmm4 +; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4 +; SSE41-NEXT: paddq %xmm6, %xmm4 +; SSE41-NEXT: pxor %xmm3, %xmm11 +; SSE41-NEXT: pxor %xmm7, %xmm12 +; SSE41-NEXT: movdqa %xmm12, %xmm2 +; SSE41-NEXT: pcmpgtd %xmm11, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2] +; SSE41-NEXT: pcmpeqd %xmm11, %xmm12 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,3,3] +; SSE41-NEXT: pand %xmm5, %xmm0 +; SSE41-NEXT: por %xmm2, %xmm0 +; SSE41-NEXT: pxor %xmm7, %xmm10 +; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm10 +; SSE41-NEXT: paddq %xmm7, %xmm10 +; SSE41-NEXT: movdqa %xmm8, %xmm0 +; SSE41-NEXT: movdqa %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm10, %xmm3 ; SSE41-NEXT: retq ; ; AVX1-LABEL: v8i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpextrq $1, %xmm4, %rax ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX1-NEXT: vpextrq $1, %xmm5, %rcx -; AVX1-NEXT: addq %rax, %rcx -; AVX1-NEXT: movq $-1, %rax -; AVX1-NEXT: cmovbq %rax, %rcx -; AVX1-NEXT: vmovq %rcx, %xmm6 -; AVX1-NEXT: vmovq %xmm4, %rcx -; AVX1-NEXT: vmovq %xmm5, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm4 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] -; AVX1-NEXT: vpextrq $1, %xmm2, %rcx -; AVX1-NEXT: vpextrq $1, %xmm0, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm5 -; AVX1-NEXT: vmovq %xmm2, %rcx -; AVX1-NEXT: vmovq %xmm0, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm0 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 -; AVX1-NEXT: vpextrq $1, %xmm2, %rcx -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 -; AVX1-NEXT: vpextrq $1, %xmm4, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm5 -; AVX1-NEXT: vmovq %xmm2, %rcx -; AVX1-NEXT: vmovq %xmm4, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0] -; AVX1-NEXT: vpextrq $1, %xmm3, %rcx -; AVX1-NEXT: vpextrq $1, %xmm1, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm4 -; AVX1-NEXT: vmovq %xmm3, %rcx -; AVX1-NEXT: vmovq %xmm1, %rdx -; AVX1-NEXT: addq %rcx, %rdx -; AVX1-NEXT: cmovbq %rax, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5 +; AVX1-NEXT: vxorps %xmm6, %xmm6, %xmm6 +; AVX1-NEXT: vcmptrueps %ymm6, %ymm6, %ymm8 +; AVX1-NEXT: vxorps %ymm8, %ymm2, %ymm7 +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm6 +; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm9 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6 +; AVX1-NEXT: vxorps %xmm4, %xmm7, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm5, %ymm5 +; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm7, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6 +; AVX1-NEXT: vpaddq %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vxorps %ymm8, %ymm3, %ymm5 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6 +; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm6 +; AVX1-NEXT: vxorps %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm5, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 +; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: v8i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 -; AVX2-NEXT: vpextrq $1, %xmm4, %rax -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 -; AVX2-NEXT: vpextrq $1, %xmm5, %rcx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: movq $-1, %rax -; AVX2-NEXT: cmovbq %rax, %rcx -; AVX2-NEXT: vmovq %rcx, %xmm6 -; AVX2-NEXT: vmovq %xmm4, %rcx -; AVX2-NEXT: vmovq %xmm5, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm4 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0] -; AVX2-NEXT: vpextrq $1, %xmm2, %rcx -; AVX2-NEXT: vpextrq $1, %xmm0, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm5 -; AVX2-NEXT: vmovq %xmm2, %rcx -; AVX2-NEXT: vmovq %xmm0, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 -; AVX2-NEXT: vpextrq $1, %xmm2, %rcx -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4 -; AVX2-NEXT: vpextrq $1, %xmm4, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm5 -; AVX2-NEXT: vmovq %xmm2, %rcx -; AVX2-NEXT: vmovq %xmm4, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm2 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0] -; AVX2-NEXT: vpextrq $1, %xmm3, %rcx -; AVX2-NEXT: vpextrq $1, %xmm1, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm4 -; AVX2-NEXT: vmovq %xmm3, %rcx -; AVX2-NEXT: vmovq %xmm1, %rdx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: cmovbq %rax, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] +; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm5 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807] +; AVX2-NEXT: vpxor %ymm6, %ymm2, %ymm7 +; AVX2-NEXT: vpcmpgtq %ymm5, %ymm7, %ymm5 +; AVX2-NEXT: vpcmpeqd %ymm7, %ymm7, %ymm7 +; AVX2-NEXT: vpxor %ymm7, %ymm2, %ymm8 +; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm8, %ymm0 +; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm2 +; AVX2-NEXT: vpxor %ymm6, %ymm3, %ymm4 +; AVX2-NEXT: vpcmpgtq %ymm2, %ymm4, %ymm2 +; AVX2-NEXT: vpxor %ymm7, %ymm3, %ymm4 +; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: v8i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm2 -; AVX512-NEXT: vpextrq $1, %xmm2, %rax -; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm3 -; AVX512-NEXT: vpextrq $1, %xmm3, %rcx -; AVX512-NEXT: addq %rax, %rcx -; AVX512-NEXT: movq $-1, %rax -; AVX512-NEXT: cmovbq %rax, %rcx -; AVX512-NEXT: vmovq %rcx, %xmm4 -; AVX512-NEXT: vmovq %xmm2, %rcx -; AVX512-NEXT: vmovq %xmm3, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm2 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3 -; AVX512-NEXT: vpextrq $1, %xmm3, %rcx -; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm4 -; AVX512-NEXT: vpextrq $1, %xmm4, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm5 -; AVX512-NEXT: vmovq %xmm3, %rcx -; AVX512-NEXT: vmovq %xmm4, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm3 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX512-NEXT: vpextrq $1, %xmm3, %rcx -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4 -; AVX512-NEXT: vpextrq $1, %xmm4, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm5 -; AVX512-NEXT: vmovq %xmm3, %rcx -; AVX512-NEXT: vmovq %xmm4, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm3 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] -; AVX512-NEXT: vpextrq $1, %xmm1, %rcx -; AVX512-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm4 -; AVX512-NEXT: vmovq %xmm1, %rcx -; AVX512-NEXT: vmovq %xmm0, %rdx -; AVX512-NEXT: addq %rcx, %rdx -; AVX512-NEXT: cmovbq %rax, %rdx -; AVX512-NEXT: vmovq %rdx, %xmm0 -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] -; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vmovdqa64 %zmm1, %zmm2 +; AVX512-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm2 +; AVX512-NEXT: vpminuq %zmm2, %zmm0, %zmm0 +; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retq %z = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %x, <8 x i64> %y) ret <8 x i64> %z Index: test/Transforms/SLPVectorizer/X86/arith-add-usat.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/arith-add-usat.ll +++ test/Transforms/SLPVectorizer/X86/arith-add-usat.ll @@ -26,40 +26,163 @@ declare i8 @llvm.uadd.sat.i8 (i8 , i8 ) define void @add_v8i64() { -; CHECK-LABEL: @add_v8i64( -; CHECK-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8 -; CHECK-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8 -; CHECK-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8 -; CHECK-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8 -; CHECK-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8 -; CHECK-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8 -; CHECK-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8 -; CHECK-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8 -; CHECK-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8 -; CHECK-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8 -; CHECK-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8 -; CHECK-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8 -; CHECK-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8 -; CHECK-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8 -; CHECK-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8 -; CHECK-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8 -; CHECK-NEXT: [[R0:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A0]], i64 [[B0]]) -; CHECK-NEXT: [[R1:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A1]], i64 [[B1]]) -; CHECK-NEXT: [[R2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A2]], i64 [[B2]]) -; CHECK-NEXT: [[R3:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A3]], i64 [[B3]]) -; CHECK-NEXT: [[R4:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A4]], i64 [[B4]]) -; CHECK-NEXT: [[R5:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A5]], i64 [[B5]]) -; CHECK-NEXT: [[R6:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A6]], i64 [[B6]]) -; CHECK-NEXT: [[R7:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A7]], i64 [[B7]]) -; CHECK-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8 -; CHECK-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8 -; CHECK-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8 -; CHECK-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8 -; CHECK-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8 -; CHECK-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8 -; CHECK-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8 -; CHECK-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8 -; CHECK-NEXT: ret void +; SSE-LABEL: @add_v8i64( +; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8 +; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8 +; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8 +; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8 +; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8 +; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8 +; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8 +; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8 +; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8 +; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8 +; SSE-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8 +; SSE-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8 +; SSE-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8 +; SSE-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8 +; SSE-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8 +; SSE-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8 +; SSE-NEXT: [[R0:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A0]], i64 [[B0]]) +; SSE-NEXT: [[R1:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A1]], i64 [[B1]]) +; SSE-NEXT: [[R2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A2]], i64 [[B2]]) +; SSE-NEXT: [[R3:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A3]], i64 [[B3]]) +; SSE-NEXT: [[R4:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A4]], i64 [[B4]]) +; SSE-NEXT: [[R5:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A5]], i64 [[B5]]) +; SSE-NEXT: [[R6:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A6]], i64 [[B6]]) +; SSE-NEXT: [[R7:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A7]], i64 [[B7]]) +; SSE-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8 +; SSE-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8 +; SSE-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8 +; SSE-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8 +; SSE-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8 +; SSE-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8 +; SSE-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8 +; SSE-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8 +; SSE-NEXT: ret void +; +; SLM-LABEL: @add_v8i64( +; SLM-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8 +; SLM-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8 +; SLM-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8 +; SLM-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8 +; SLM-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8 +; SLM-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8 +; SLM-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8 +; SLM-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8 +; SLM-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8 +; SLM-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8 +; SLM-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8 +; SLM-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8 +; SLM-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8 +; SLM-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8 +; SLM-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8 +; SLM-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8 +; SLM-NEXT: [[R0:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A0]], i64 [[B0]]) +; SLM-NEXT: [[R1:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A1]], i64 [[B1]]) +; SLM-NEXT: [[R2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A2]], i64 [[B2]]) +; SLM-NEXT: [[R3:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A3]], i64 [[B3]]) +; SLM-NEXT: [[R4:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A4]], i64 [[B4]]) +; SLM-NEXT: [[R5:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A5]], i64 [[B5]]) +; SLM-NEXT: [[R6:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A6]], i64 [[B6]]) +; SLM-NEXT: [[R7:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A7]], i64 [[B7]]) +; SLM-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8 +; SLM-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8 +; SLM-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8 +; SLM-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8 +; SLM-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8 +; SLM-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8 +; SLM-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8 +; SLM-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8 +; SLM-NEXT: ret void +; +; AVX1-LABEL: @add_v8i64( +; AVX1-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8 +; AVX1-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8 +; AVX1-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8 +; AVX1-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8 +; AVX1-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8 +; AVX1-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8 +; AVX1-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8 +; AVX1-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8 +; AVX1-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8 +; AVX1-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8 +; AVX1-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8 +; AVX1-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8 +; AVX1-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8 +; AVX1-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8 +; AVX1-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8 +; AVX1-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8 +; AVX1-NEXT: [[R0:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A0]], i64 [[B0]]) +; AVX1-NEXT: [[R1:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A1]], i64 [[B1]]) +; AVX1-NEXT: [[R2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A2]], i64 [[B2]]) +; AVX1-NEXT: [[R3:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A3]], i64 [[B3]]) +; AVX1-NEXT: [[R4:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A4]], i64 [[B4]]) +; AVX1-NEXT: [[R5:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A5]], i64 [[B5]]) +; AVX1-NEXT: [[R6:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A6]], i64 [[B6]]) +; AVX1-NEXT: [[R7:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A7]], i64 [[B7]]) +; AVX1-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8 +; AVX1-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8 +; AVX1-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8 +; AVX1-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8 +; AVX1-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8 +; AVX1-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8 +; AVX1-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8 +; AVX1-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8 +; AVX1-NEXT: ret void +; +; AVX2-LABEL: @add_v8i64( +; AVX2-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8 +; AVX2-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8 +; AVX2-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8 +; AVX2-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8 +; AVX2-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8 +; AVX2-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8 +; AVX2-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8 +; AVX2-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8 +; AVX2-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8 +; AVX2-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8 +; AVX2-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8 +; AVX2-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8 +; AVX2-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8 +; AVX2-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8 +; AVX2-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8 +; AVX2-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8 +; AVX2-NEXT: [[R0:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A0]], i64 [[B0]]) +; AVX2-NEXT: [[R1:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A1]], i64 [[B1]]) +; AVX2-NEXT: [[R2:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A2]], i64 [[B2]]) +; AVX2-NEXT: [[R3:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A3]], i64 [[B3]]) +; AVX2-NEXT: [[R4:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A4]], i64 [[B4]]) +; AVX2-NEXT: [[R5:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A5]], i64 [[B5]]) +; AVX2-NEXT: [[R6:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A6]], i64 [[B6]]) +; AVX2-NEXT: [[R7:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A7]], i64 [[B7]]) +; AVX2-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8 +; AVX2-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8 +; AVX2-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8 +; AVX2-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8 +; AVX2-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8 +; AVX2-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8 +; AVX2-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8 +; AVX2-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8 +; AVX2-NEXT: ret void +; +; AVX512-LABEL: @add_v8i64( +; AVX512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @a64 to <8 x i64>*), align 8 +; AVX512-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* bitcast ([8 x i64]* @b64 to <8 x i64>*), align 8 +; AVX512-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP2]]) +; AVX512-NEXT: store <8 x i64> [[TMP3]], <8 x i64>* bitcast ([8 x i64]* @c64 to <8 x i64>*), align 8 +; AVX512-NEXT: ret void +; +; AVX256BW-LABEL: @add_v8i64( +; AVX256BW-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @a64 to <4 x i64>*), align 8 +; AVX256BW-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4) to <4 x i64>*), align 8 +; AVX256BW-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([8 x i64]* @b64 to <4 x i64>*), align 8 +; AVX256BW-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4) to <4 x i64>*), align 8 +; AVX256BW-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP3]]) +; AVX256BW-NEXT: [[TMP6:%.*]] = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> [[TMP2]], <4 x i64> [[TMP4]]) +; AVX256BW-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8 +; AVX256BW-NEXT: store <4 x i64> [[TMP6]], <4 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <4 x i64>*), align 8 +; AVX256BW-NEXT: ret void ; %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8 %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8 @@ -97,72 +220,109 @@ } define void @add_v16i32() { -; CHECK-LABEL: @add_v16i32( -; CHECK-NEXT: [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4 -; CHECK-NEXT: [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4 -; CHECK-NEXT: [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4 -; CHECK-NEXT: [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4 -; CHECK-NEXT: [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4 -; CHECK-NEXT: [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4 -; CHECK-NEXT: [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4 -; CHECK-NEXT: [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4 -; CHECK-NEXT: [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4 -; CHECK-NEXT: [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4 -; CHECK-NEXT: [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4 -; CHECK-NEXT: [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4 -; CHECK-NEXT: [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4 -; CHECK-NEXT: [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4 -; CHECK-NEXT: [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4 -; CHECK-NEXT: [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4 -; CHECK-NEXT: [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4 -; CHECK-NEXT: [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4 -; CHECK-NEXT: [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4 -; CHECK-NEXT: [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4 -; CHECK-NEXT: [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4 -; CHECK-NEXT: [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4 -; CHECK-NEXT: [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4 -; CHECK-NEXT: [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4 -; CHECK-NEXT: [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4 -; CHECK-NEXT: [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4 -; CHECK-NEXT: [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4 -; CHECK-NEXT: [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4 -; CHECK-NEXT: [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4 -; CHECK-NEXT: [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4 -; CHECK-NEXT: [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4 -; CHECK-NEXT: [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4 -; CHECK-NEXT: [[R0:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A0]], i32 [[B0]]) -; CHECK-NEXT: [[R1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A1]], i32 [[B1]]) -; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A2]], i32 [[B2]]) -; CHECK-NEXT: [[R3:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A3]], i32 [[B3]]) -; CHECK-NEXT: [[R4:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A4]], i32 [[B4]]) -; CHECK-NEXT: [[R5:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A5]], i32 [[B5]]) -; CHECK-NEXT: [[R6:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A6]], i32 [[B6]]) -; CHECK-NEXT: [[R7:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A7]], i32 [[B7]]) -; CHECK-NEXT: [[R8:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A8]], i32 [[B8]]) -; CHECK-NEXT: [[R9:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A9]], i32 [[B9]]) -; CHECK-NEXT: [[R10:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A10]], i32 [[B10]]) -; CHECK-NEXT: [[R11:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A11]], i32 [[B11]]) -; CHECK-NEXT: [[R12:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A12]], i32 [[B12]]) -; CHECK-NEXT: [[R13:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A13]], i32 [[B13]]) -; CHECK-NEXT: [[R14:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A14]], i32 [[B14]]) -; CHECK-NEXT: [[R15:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A15]], i32 [[B15]]) -; CHECK-NEXT: store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4 -; CHECK-NEXT: store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4 -; CHECK-NEXT: store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4 -; CHECK-NEXT: store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4 -; CHECK-NEXT: store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4 -; CHECK-NEXT: store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4 -; CHECK-NEXT: store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4 -; CHECK-NEXT: store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4 -; CHECK-NEXT: store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4 -; CHECK-NEXT: store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4 -; CHECK-NEXT: store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4 -; CHECK-NEXT: store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4 -; CHECK-NEXT: store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4 -; CHECK-NEXT: store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4 -; CHECK-NEXT: store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4 -; CHECK-NEXT: store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4 -; CHECK-NEXT: ret void +; SSE-LABEL: @add_v16i32( +; SSE-NEXT: [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4 +; SSE-NEXT: [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4 +; SSE-NEXT: [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4 +; SSE-NEXT: [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4 +; SSE-NEXT: [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4 +; SSE-NEXT: [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4 +; SSE-NEXT: [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4 +; SSE-NEXT: [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4 +; SSE-NEXT: [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4 +; SSE-NEXT: [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4 +; SSE-NEXT: [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4 +; SSE-NEXT: [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4 +; SSE-NEXT: [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4 +; SSE-NEXT: [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4 +; SSE-NEXT: [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4 +; SSE-NEXT: [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4 +; SSE-NEXT: [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4 +; SSE-NEXT: [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4 +; SSE-NEXT: [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4 +; SSE-NEXT: [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4 +; SSE-NEXT: [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4 +; SSE-NEXT: [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4 +; SSE-NEXT: [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4 +; SSE-NEXT: [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4 +; SSE-NEXT: [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4 +; SSE-NEXT: [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4 +; SSE-NEXT: [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4 +; SSE-NEXT: [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4 +; SSE-NEXT: [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4 +; SSE-NEXT: [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4 +; SSE-NEXT: [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4 +; SSE-NEXT: [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4 +; SSE-NEXT: [[R0:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A0]], i32 [[B0]]) +; SSE-NEXT: [[R1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A1]], i32 [[B1]]) +; SSE-NEXT: [[R2:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A2]], i32 [[B2]]) +; SSE-NEXT: [[R3:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A3]], i32 [[B3]]) +; SSE-NEXT: [[R4:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A4]], i32 [[B4]]) +; SSE-NEXT: [[R5:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A5]], i32 [[B5]]) +; SSE-NEXT: [[R6:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A6]], i32 [[B6]]) +; SSE-NEXT: [[R7:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A7]], i32 [[B7]]) +; SSE-NEXT: [[R8:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A8]], i32 [[B8]]) +; SSE-NEXT: [[R9:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A9]], i32 [[B9]]) +; SSE-NEXT: [[R10:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A10]], i32 [[B10]]) +; SSE-NEXT: [[R11:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A11]], i32 [[B11]]) +; SSE-NEXT: [[R12:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A12]], i32 [[B12]]) +; SSE-NEXT: [[R13:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A13]], i32 [[B13]]) +; SSE-NEXT: [[R14:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A14]], i32 [[B14]]) +; SSE-NEXT: [[R15:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[A15]], i32 [[B15]]) +; SSE-NEXT: store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4 +; SSE-NEXT: store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4 +; SSE-NEXT: store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4 +; SSE-NEXT: store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4 +; SSE-NEXT: store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4 +; SSE-NEXT: store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4 +; SSE-NEXT: store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4 +; SSE-NEXT: store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4 +; SSE-NEXT: store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4 +; SSE-NEXT: store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4 +; SSE-NEXT: store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4 +; SSE-NEXT: store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4 +; SSE-NEXT: store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4 +; SSE-NEXT: store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4 +; SSE-NEXT: store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4 +; SSE-NEXT: store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4 +; SSE-NEXT: ret void +; +; SLM-LABEL: @add_v16i32( +; SLM-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @a32 to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4) to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12) to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @b32 to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4) to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12) to <4 x i32>*), align 4 +; SLM-NEXT: [[TMP9:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[TMP1]], <4 x i32> [[TMP5]]) +; SLM-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP6]]) +; SLM-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[TMP3]], <4 x i32> [[TMP7]]) +; SLM-NEXT: [[TMP12:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[TMP4]], <4 x i32> [[TMP8]]) +; SLM-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([16 x i32]* @c32 to <4 x i32>*), align 4 +; SLM-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4) to <4 x i32>*), align 4 +; SLM-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <4 x i32>*), align 4 +; SLM-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12) to <4 x i32>*), align 4 +; SLM-NEXT: ret void +; +; AVX-LABEL: @add_v16i32( +; AVX-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @a32 to <8 x i32>*), align 4 +; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8) to <8 x i32>*), align 4 +; AVX-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([16 x i32]* @b32 to <8 x i32>*), align 4 +; AVX-NEXT: [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8) to <8 x i32>*), align 4 +; AVX-NEXT: [[TMP5:%.*]] = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP3]]) +; AVX-NEXT: [[TMP6:%.*]] = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> [[TMP2]], <8 x i32> [[TMP4]]) +; AVX-NEXT: store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([16 x i32]* @c32 to <8 x i32>*), align 4 +; AVX-NEXT: store <8 x i32> [[TMP6]], <8 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8) to <8 x i32>*), align 4 +; AVX-NEXT: ret void +; +; AVX512-LABEL: @add_v16i32( +; AVX512-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @a32 to <16 x i32>*), align 4 +; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([16 x i32]* @b32 to <16 x i32>*), align 4 +; AVX512-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP2]]) +; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast ([16 x i32]* @c32 to <16 x i32>*), align 4 +; AVX512-NEXT: ret void ; %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4 %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4