Index: lib/Target/X86/X86ScheduleZnver1.td =================================================================== --- lib/Target/X86/X86ScheduleZnver1.td +++ lib/Target/X86/X86ScheduleZnver1.td @@ -92,7 +92,7 @@ def : ReadAdvance; // (a folded load is an instruction that loads and does some operation) -// Ex: ADDPD xmm,[mem]-> This instruction has two micro-ops +// Ex: ADDPD xmm,[mem]-> This instruction has two micro-ops // Instructions with folded loads are usually micro-fused, so they only appear // as two micro-ops. // a. load and @@ -104,7 +104,7 @@ // Register variant takes 1-cycle on Execution Port. def : WriteRes { let Latency = Lat; } - // Memory variant also uses a cycle on ZnAGU + // Memory variant also uses a cycle on ZnAGU // adds 4 cycles to the latency. def : WriteRes { let Latency = !add(Lat, 4); @@ -125,7 +125,7 @@ } } -// WriteRMW is set for instructions with Memory write +// WriteRMW is set for instructions with Memory write // operation in codegen def : WriteRes; @@ -220,4 +220,471 @@ def : WriteRes; def : WriteRes; } + +//=== Regex based itineraries ===// +// Notation: +// - r: register. +// - m = memory. +// - i = immediate +// - mm: 64 bit mmx register. +// - x = 128 bit xmm register. +// - (x)mm = mmx or xmm register. +// - y = 256 bit ymm register. +// - v = any vector register. + +//=== Integer Instructions ===// +//-- Move instructions --// +// MOV. +// r16,m. +def : InstRW<[WriteALULd, ReadAfterLd], (instregex "MOV16rm")>; + +// MOVSX, MOVZX. +// r,m. +def : InstRW<[WriteLoad], (instregex "MOV(S|Z)X32rm(8|16)")>; + +// CMOVcc. +// r,r. +def : InstRW<[WriteALU], + (instregex "CMOV(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)(16|32|64)rr")>; +// r,m. +def : InstRW<[WriteALULd, ReadAfterLd], + (instregex "CMOV(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)(16|32|64)rm")>; + +// XCHG. +// r,r. +def ZnWriteXCHG : SchedWriteRes<[ZnALU]> { + let NumMicroOps = 2; + let ResourceCycles = [2]; +} + +def : InstRW<[ZnWriteXCHG], (instregex "XCHG(8|16|32|64)rr", "XCHG(16|32|64)ar")>; + +// r,m. +def ZnWriteXCHGrm : SchedWriteRes<[ZnAGU, ZnALU]> { + let Latency = 5; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteXCHGrm, ReadAfterLd], (instregex "XCHG(8|16|32|64)rm")>; + +def : InstRW<[WriteMicrocoded], (instregex "XLAT")>; + +// POP16. +// r. +def ZnWritePop16r : SchedWriteRes<[ZnAGU]>{ + let Latency = 5; + let NumMicroOps = 2; +} +def : InstRW<[ZnWritePop16r], (instregex "POP16rmm")>; +def : InstRW<[WriteMicrocoded], (instregex "POPF(16|32)")>; +def : InstRW<[WriteMicrocoded], (instregex "POPA(16|32)")>; + + +// PUSH. +// r. Has default values. +// m. +def ZnWritePUSH : SchedWriteRes<[ZnAGU]>{ + let Latency = 4; +} +def : InstRW<[ZnWritePUSH], (instregex "PUSH(16|32)rmm")>; + +//PUSHF +def : InstRW<[WriteMicrocoded], (instregex "PUSHF(16|32)")>; + +// PUSHA. +def ZnWritePushA : SchedWriteRes<[ZnAGU]> { + let Latency = 8; +} +def : InstRW<[ZnWritePushA], (instregex "PUSHA(16|32)")>; + +//LAHF +def : InstRW<[WriteMicrocoded], (instregex "LAHF")>; + +// SAHF. +def ZnWriteSAHF : SchedWriteRes<[ZnALU]> { + let Latency = 2; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteSAHF], (instregex "SAHF")>; + +// BSWAP. +def ZnWriteBSwap : SchedWriteRes<[ZnALU]> { + let ResourceCycles = [4]; +} +def : InstRW<[ZnWriteBSwap], (instregex "BSWAP")>; + +// MOVBE. +// r,m. +def ZnWriteMOVBE : SchedWriteRes<[ZnAGU, ZnALU]> { + let Latency = 5; + let ResourceCycles = [2]; +} +def : InstRW<[ZnWriteMOVBE, ReadAfterLd], (instregex "MOVBE(16|32|64)rm")>; + +// m16,r16. +def : InstRW<[ZnWriteMOVBE], (instregex "MOVBE(16|32|64)mr")>; + +//-- Arithmetic instructions --// + +// ADD SUB. +// m,r/i. +def : InstRW<[WriteALULd], (instregex "(ADD|SUB)(8|16|32|64)m(r|i)", + "(ADD|SUB)(8|16|32|64)mi8", + "(ADD|SUB)64mi32")>; + +// ADC SBB. +// r,r/i. +def : InstRW<[WriteALU], (instregex "(ADC|SBB)(8|16|32|64)r(r|i)", + "(ADC|SBB)(16|32|64)ri8", + "(ADC|SBB)64ri32", + "(ADC|SBB)(8|16|32|64)rr_REV")>; + +// r,m. +def : InstRW<[WriteALULd, ReadAfterLd], + (instregex "(ADC|SBB)(8|16|32|64)rm")>; + +// m,r/i. +def : InstRW<[WriteALULd], + (instregex "(ADC|SBB)(8|16|32|64)m(r|i)", + "(ADC|SBB)(16|32|64)mi8", + "(ADC|SBB)64mi32")>; + +// INC DEC NOT NEG. +// m. +def : InstRW<[WriteALULd], + (instregex "(INC|DEC|NOT|NEG)(8|16|32|64)m", + "(INC|DEC)64(16|32)m")>; + +// MUL IMUL. +// r16. +def ZnWriteMul16 : SchedWriteRes<[ZnALU1, ZnMultiplier]> { + let Latency = 3; +} +def : InstRW<[ZnWriteMul16], (instregex "IMUL16r", "MUL16r")>; + +// m16. +def ZnWriteMul16Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> { + let Latency = 8; +} +def : InstRW<[ZnWriteMul16Ld, ReadAfterLd], (instregex "IMUL16m", "MUL16m")>; + +// r32. +def ZnWriteMul32 : SchedWriteRes<[ZnALU1, ZnMultiplier]> { + let Latency = 3; +} +def : InstRW<[ZnWriteMul32], (instregex "IMUL32r", "MUL32r")>; + +// m32. +def ZnWriteMul32Ld : SchedWriteRes<[ZnALU1, ZnMultiplier]> { + let Latency = 8; +} +def : InstRW<[ZnWriteMul32Ld, ReadAfterLd], (instregex "IMUL32m", "MUL32m")>; + +// r64. +def ZnWriteMul64 : SchedWriteRes<[ZnALU1, ZnMultiplier]> { + let Latency = 4; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteMul64], (instregex "IMUL64r", "MUL64r")>; + +// m64. +def ZnWriteMul64Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> { + let Latency = 9; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteMul64Ld, ReadAfterLd], (instregex "IMUL64m", "MUL64m")>; + +// r16,r16. +def ZnWriteMul16rri : SchedWriteRes<[ZnALU1, ZnMultiplier]> { + let Latency = 3; +} +def : InstRW<[ZnWriteMul16rri], (instregex "IMUL16rri", "IMUL16rri8")>; + +// r16,m16. +def ZnWriteMul16rmi : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> { + let Latency = 8; +} +def : InstRW<[ZnWriteMul16rmi, ReadAfterLd], (instregex "IMUL16rmi", "IMUL16rmi8")>; + +// MULX. +// r32,r32,r32. +def ZnWriteMulX32 : SchedWriteRes<[ZnALU1, ZnMultiplier]> { + let Latency = 3; + let ResourceCycles = [1, 2]; +} +def : InstRW<[ZnWriteMulX32], (instregex "MULX32rr")>; + +// r32,r32,m32. +def ZnWriteMulX32Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> { + let Latency = 8; + let ResourceCycles = [1, 2, 2]; +} +def : InstRW<[ZnWriteMulX32Ld, ReadAfterLd], (instregex "MULX32rm")>; + +// r64,r64,r64. +def ZnWriteMulX64 : SchedWriteRes<[ZnALU1]> { + let Latency = 3; +} +def : InstRW<[ZnWriteMulX64], (instregex "MULX64rr")>; + +// r64,r64,m64. +def ZnWriteMulX64Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> { + let Latency = 8; } +def : InstRW<[ZnWriteMulX64Ld, ReadAfterLd], (instregex "MULX64rm")>; + +// DIV, IDIV. +// r8. +def ZnWriteDiv8 : SchedWriteRes<[ZnALU2, ZnDivider]> { + let Latency = 15; +} +def : InstRW<[ZnWriteDiv8], (instregex "DIV8r", "IDIV8r")>; + +// r16. +def ZnWriteDiv16 : SchedWriteRes<[ZnALU2, ZnDivider]> { + let Latency = 17; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteDiv16], (instregex "DIV16r", "IDIV16r")>; + +// r32. +def ZnWriteDiv32 : SchedWriteRes<[ZnALU2, ZnDivider]> { + let Latency = 25; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteDiv32], (instregex "DIV32r", "IDIV32r")>; + +// r64. +def ZnWriteDiv64 : SchedWriteRes<[ZnALU2, ZnDivider]> { + let Latency = 41; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteDiv64], (instregex "DIV64r", "IDIV64r")>; + +//-- Control transfer instructions --// + +// J(E|R)CXZ. +def ZnWriteJCXZ : SchedWriteRes<[ZnALU03]>; +def : InstRW<[ZnWriteJCXZ], (instregex "JCXZ", "JECXZ_(32|64)", "JRCXZ")>; + +// INTO +def : InstRW<[WriteMicrocoded], (instregex "INTO")>; + +// LOOP. +def ZnWriteLOOP : SchedWriteRes<[ZnALU03]>; +def : InstRW<[ZnWriteLOOP], (instregex "LOOP")>; + +// LOOP(N)E, LOOP(N)Z +def ZnWriteLOOPE : SchedWriteRes<[ZnALU03]>; +def : InstRW<[ZnWriteLOOPE], (instregex "LOOPE", "LOOPNE", + "LOOPZ", "LOOPNZ")>; + +// CALL. +// r. +def ZnWriteCALLr : SchedWriteRes<[ZnAGU, ZnALU03]>; +def : InstRW<[ZnWriteCALLr], (instregex "CALL(16|32)r")>; + +def : InstRW<[WriteMicrocoded], (instregex "CALL(16|32)m")>; + +// RET. +def ZnWriteRET : SchedWriteRes<[ZnALU03]> { + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteRET], (instregex "RET(L|Q|W)", "LRET(L|Q|W)", + "IRET(D|Q)", "RETF")>; + +//-- Logic instructions --// + +// AND OR XOR. +// m,r/i. +def : InstRW<[WriteALULd], + (instregex "(AND|OR|XOR)(8|16|32|64)m(r|i)", + "(AND|OR|XOR)(8|16|32|64)mi8", "(AND|OR|XOR)64mi32")>; + +// ANDN. +// r,r. +def : InstRW<[WriteALU], (instregex "ANDN(32|64)rr")>; +// r,m. +def : InstRW<[WriteALULd, ReadAfterLd], (instregex "ANDN(32|64)rm")>; + +// Define ALU latency variants +def ZnWriteALULat2 : SchedWriteRes<[ZnALU]> { + let Latency = 2; +} +def ZnWriteALULat2Ld : SchedWriteRes<[ZnAGU, ZnALU]> { + let Latency = 6; +} + +def ZnWriteALULat3 : SchedWriteRes<[ZnALU]> { + let Latency = 3; +} +def ZnWriteALULat3Ld : SchedWriteRes<[ZnAGU, ZnALU]> { + let Latency = 7; +} + +// BSF BSR. +// r,r. +def : InstRW<[ZnWriteALULat3], (instregex "BS(R|F)(16|32|64)rr")>; +// r,m. +def : InstRW<[ZnWriteALULat3Ld, ReadAfterLd], (instregex "BS(R|F)(16|32|64)rm")>; + +// BT. +// r,r/i. +def : InstRW<[WriteShift], (instregex "BT(16|32|64)r(r|i8)")>; + +def : InstRW<[WriteShiftLd], (instregex "BT(16|32|64)mr")>; +def : InstRW<[WriteShiftLd], (instregex "BT(16|32|64)mi8")>; + +// BTR BTS BTC. +// r,r,i. +def ZnWriteBTRSC : SchedWriteRes<[ZnALU]> { + let Latency = 2; + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteBTRSC], (instregex "BT(R|S|C)(16|32|64)r(r|i8)")>; + + +// m,r,i. +def ZnWriteBTRSCm : SchedWriteRes<[ZnAGU, ZnALU]> { + let Latency = 6; + let NumMicroOps = 2; +} +// m,r,i. +def : InstRW<[ZnWriteBTRSCm], (instregex "BT(R|S|C)(16|32|64)m(r|i8)")>; + +// BLSI BLSMSK BLSR. +// r,r. +def : InstRW<[ZnWriteALULat2], (instregex "BLS(I|MSK|R)(32|64)rr")>; +// r,m. +def : InstRW<[ZnWriteALULat2Ld, ReadAfterLd], (instregex "BLS(I|MSK|R)(32|64)rm")>; + +// BEXTR. +// r,r,r. +def : InstRW<[WriteALU], (instregex "BEXTR(32|64)rr")>; +// r,m,r. +def : InstRW<[WriteALULd, ReadAfterLd], (instregex "BEXTR(32|64)rm")>; + +// BZHI. +// r,r,r. +def : InstRW<[WriteALU], (instregex "BZHI(32|64)rr")>; +// r,m,r. +def : InstRW<[WriteALULd, ReadAfterLd], (instregex "BZHI(32|64)rm")>; + +// CLD STD. +def : InstRW<[WriteALU], (instregex "STD", "CLD")>; + +// PDEP PEXT. +// r,r,r. +def : InstRW<[WriteMicrocoded], (instregex "PDEP(32|64)rr", "PEXT(32|64)rr")>; +// r,m,r. +def : InstRW<[WriteMicrocoded], (instregex "PDEP(32|64)rm", "PEXT(32|64)rm")>; + +// ROR ROL. +def : InstRW<[WriteShift], (instregex "RO(R|L)(8|16|32|64)r1")>; + +// RCR RCL. +// r,1. +def : InstRW<[WriteShift], (instregex "RC(R|L)(8|16|32|64)r1")>; + +// m,1. +def : InstRW<[WriteMicrocoded], (instregex "RC(R|L)(8|16|32|64)m1")>; + +// i. +def : InstRW<[WriteShift], (instregex "RC(R|L)(8|16|32|64)r(i|CL)")>; + +// m,i. +def : InstRW<[WriteMicrocoded], (instregex "RC(R|L)(8|16|32|64)m(i|CL)")>; + +// SHR SHL SAR. +// m,i. +def : InstRW<[WriteShiftLd], (instregex "S(A|H)(R|L)(8|16|32|64)m(i|1)")>; + +// SHRD SHLD. +// r,r +def : InstRW<[WriteShift], (instregex "SH(R|L)D(16|32|64)rri8")>; + +// m,r +def : InstRW<[WriteShiftLd], (instregex "SH(R|L)D(16|32|64)mri8")>; + +// r,r,cl. +def : InstRW<[WriteMicrocoded], (instregex "SHLD(16|32|64)rrCL")>; + +// r,r,cl. +def : InstRW<[WriteMicrocoded], (instregex "SHRD(16|32|64)rrCL")>; + +// m,r,cl. +def : InstRW<[WriteMicrocoded], (instregex "SH(R|L)D(16|32|64)mrCL")>; + +// SETcc. +// r. +def : InstRW<[WriteShift], + (instregex "SET(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)r")>; +// m. +def : InstRW<[WriteShift], + (instregex "SET(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)m")>; + +// LZCNT TZCNT. +// r,r. +def : InstRW<[ZnWriteALULat2], (instregex "(L|TZCNT)(16|32|64)rr")>; +// r,m. +def : InstRW<[ZnWriteALULat2Ld, ReadAfterLd], (instregex "(L|TZCNT)(16|32|64)rm")>; + +//-- Misc instructions --// +// CMPXCHG. +def ZnWriteCMPXCHG : SchedWriteRes<[ZnAGU, ZnALU]> { + let Latency = 8; + let NumMicroOps = 5; +} +def : InstRW<[ZnWriteCMPXCHG], (instregex "CMPXCHG(8|16|32|64)rm")>; + +// CMPXCHG8B. +def ZnWriteCMPXCHG8B : SchedWriteRes<[ZnAGU, ZnALU]> { + let NumMicroOps = 18; +} +def : InstRW<[ZnWriteCMPXCHG8B], (instregex "CMPXCHG8B")>; + +def : InstRW<[WriteMicrocoded], (instregex "CMPXCHG16B")>; + +// LEAVE +def ZnWriteLEAVE : SchedWriteRes<[ZnALU, ZnAGU]> { + let NumMicroOps = 2; +} +def : InstRW<[ZnWriteLEAVE], (instregex "LEAVE")>; + +// PAUSE. +def : InstRW<[WriteMicrocoded], (instregex "PAUSE")>; + +// RDTSC. +def : InstRW<[WriteMicrocoded], (instregex "RDTSC")>; + +// RDPMC. +def : InstRW<[WriteMicrocoded], (instregex "RDPMC")>; + +// RDRAND. +def : InstRW<[WriteMicrocoded], (instregex "RDRAND(16|32|64)r")>; + +// XGETBV. +def : InstRW<[WriteMicrocoded], (instregex "XGETBV")>; + +//-- String instructions --// +// CMPS. +def : InstRW<[WriteMicrocoded], (instregex "CMPS(B|L|Q|W)")>; + +// LODSB/W. +def : InstRW<[WriteMicrocoded], (instregex "LODS(B|W)")>; + +// LODSD/Q. +def : InstRW<[WriteMicrocoded], (instregex "LODS(L|Q)")>; + +// MOVS. +def : InstRW<[WriteMicrocoded], (instregex "MOVS(B|L|Q|W)")>; + +// SCAS. +def : InstRW<[WriteMicrocoded], (instregex "SCAS(B|W|L|Q)")>; + +// STOS +def : InstRW<[WriteMicrocoded], (instregex "STOS(B|L|Q|W)")>; + +// XADD. +def : InstRW<[WriteMicrocoded], (instregex "XADD(8|16|32|64)rm")>; + +} // SchedModel Index: test/CodeGen/X86/avx-schedule.ll =================================================================== --- test/CodeGen/X86/avx-schedule.ll +++ test/CodeGen/X86/avx-schedule.ll @@ -36,7 +36,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fadd <4 x double> %1, %2 @@ -72,7 +72,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fadd <8 x float> %1, %2 @@ -108,7 +108,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %1, <4 x double> %2) @@ -145,7 +145,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %1, <8 x float> %2) @@ -187,7 +187,7 @@ ; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = xor <4 x i64> %1, @@ -235,7 +235,7 @@ ; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = xor <4 x i64> %1, @@ -283,7 +283,7 @@ ; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = and <4 x i64> %1, %2 @@ -329,7 +329,7 @@ ; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = and <4 x i64> %1, %2 @@ -375,7 +375,7 @@ ; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fadd <4 x double> %a1, %1 @@ -412,7 +412,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50] ; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> @@ -448,7 +448,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) %2 = load <4 x double>, <4 x double> *%a3, align 32 %3 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %1, <4 x double> %2, <4 x double> %a2) @@ -485,7 +485,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) %2 = load <8 x float>, <8 x float> *%a3, align 32 %3 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %1, <8 x float> %2, <8 x float> %a2) @@ -517,7 +517,7 @@ ; ZNVER1-LABEL: test_broadcastf128: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x float>, <4 x float> *%a0, align 32 %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> ret <8 x float> %2 @@ -547,7 +547,7 @@ ; ZNVER1-LABEL: test_broadcastsd_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load double, double *%a0, align 8 %2 = insertelement <4 x double> undef, double %1, i32 0 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> zeroinitializer @@ -578,7 +578,7 @@ ; ZNVER1-LABEL: test_broadcastss: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load float, float *%a0, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer @@ -609,7 +609,7 @@ ; ZNVER1-LABEL: test_broadcastss_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load float, float *%a0, align 4 %2 = insertelement <8 x float> undef, float %1, i32 0 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer @@ -650,7 +650,7 @@ ; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] ; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fcmp oeq <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fcmp oeq <4 x double> %a0, %2 @@ -695,7 +695,7 @@ ; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] ; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fcmp oeq <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fcmp oeq <8 x float> %a0, %2 @@ -740,7 +740,7 @@ ; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp <4 x i32> %a0 to <4 x double> %2 = load <4 x i32>, <4 x i32> *%a1, align 16 %3 = sitofp <4 x i32> %2 to <4 x double> @@ -784,7 +784,7 @@ ; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp <8 x i32> %a0 to <8 x float> %2 = load <8 x i32>, <8 x i32> *%a1, align 16 %3 = sitofp <8 x i32> %2 to <8 x float> @@ -826,7 +826,7 @@ ; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi <4 x double> %a0 to <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = fptosi <4 x double> %2 to <4 x i32> @@ -868,7 +868,7 @@ ; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptrunc <4 x double> %a0 to <4 x float> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = fptrunc <4 x double> %2 to <4 x float> @@ -910,7 +910,7 @@ ; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi <8 x float> %a0 to <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = fptosi <8 x float> %2 to <8 x i32> @@ -947,7 +947,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:1.00] ; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fdiv <4 x double> %1, %2 @@ -983,7 +983,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [15:1.00] ; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fdiv <8 x float> %1, %2 @@ -1019,7 +1019,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %1, <8 x float> %2, i8 7) @@ -1060,7 +1060,7 @@ ; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50] ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> %2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> store <4 x float> %2, <4 x float> *%a2 @@ -1096,7 +1096,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %1, <4 x double> %2) @@ -1133,7 +1133,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %1, <8 x float> %2) @@ -1170,7 +1170,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %1, <4 x double> %2) @@ -1207,7 +1207,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %2) @@ -1249,7 +1249,7 @@ ; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50] ; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> %2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> %3 = load <4 x float>, <4 x float> *%a2, align 16 @@ -1283,7 +1283,7 @@ ; ZNVER1-LABEL: test_lddqu: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ret <32 x i8> %1 } @@ -1323,7 +1323,7 @@ ; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 ; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) ; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1) call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) ret <2 x double> %1 @@ -1365,7 +1365,7 @@ ; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 ; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) ; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1) call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2) ret <4 x double> %1 @@ -1407,7 +1407,7 @@ ; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 ; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) ; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1) call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) ret <4 x float> %1 @@ -1449,7 +1449,7 @@ ; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 ; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) ; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1) call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2) ret <8 x float> %1 @@ -1486,7 +1486,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %1, <4 x double> %2) @@ -1523,7 +1523,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %1, <8 x float> %2) @@ -1560,7 +1560,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %1, <4 x double> %2) @@ -1597,7 +1597,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %1, <8 x float> %2) @@ -1639,7 +1639,7 @@ ; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x double>, <4 x double> *%a0, align 32 %2 = fadd <4 x double> %1, %1 store <4 x double> %2, <4 x double> *%a1, align 32 @@ -1680,7 +1680,7 @@ ; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x float>, <8 x float> *%a0, align 32 %2 = fadd <8 x float> %1, %1 store <8 x float> %2, <8 x float> *%a1, align 32 @@ -1721,7 +1721,7 @@ ; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50] ; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> @@ -1757,7 +1757,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ret i32 %1 } @@ -1791,7 +1791,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ret i32 %1 } @@ -1826,7 +1826,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <4 x double> %a0, %a0 store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0 ret <4 x double> %1 @@ -1861,7 +1861,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <8 x float> %a0, %a0 store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0 ret <8 x float> %1 @@ -1901,7 +1901,7 @@ ; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50] ; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> @@ -1943,7 +1943,7 @@ ; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50] ; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> @@ -1987,7 +1987,7 @@ ; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x double>, <4 x double> *%a0, align 1 %2 = fadd <4 x double> %1, %1 store <4 x double> %2, <4 x double> *%a1, align 1 @@ -2030,7 +2030,7 @@ ; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x float>, <8 x float> *%a0, align 1 %2 = fadd <8 x float> %1, %1 store <8 x float> %2, <8 x float> *%a1, align 1 @@ -2066,7 +2066,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fmul <4 x double> %1, %2 @@ -2102,7 +2102,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fmul <8 x float> %1, %2 @@ -2143,7 +2143,7 @@ ; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = or <4 x i64> %1, %2 @@ -2189,7 +2189,7 @@ ; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = or <4 x i64> %1, %2 @@ -2235,7 +2235,7 @@ ; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50] ; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> @@ -2277,7 +2277,7 @@ ; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50] ; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> @@ -2319,7 +2319,7 @@ ; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50] ; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> @@ -2361,7 +2361,7 @@ ; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50] ; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> @@ -2398,7 +2398,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> %2) @@ -2435,7 +2435,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> %2) @@ -2472,7 +2472,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> %2) @@ -2509,7 +2509,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) %2 = load <8 x i32>, <8 x i32> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> %2) @@ -2551,7 +2551,7 @@ ; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [12:0.50] ; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %2) @@ -2594,7 +2594,7 @@ ; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00] ; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %2, i32 7) @@ -2637,7 +2637,7 @@ ; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00] ; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %2, i32 7) @@ -2680,7 +2680,7 @@ ; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [12:0.50] ; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %2) @@ -2723,7 +2723,7 @@ ; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> @@ -2760,7 +2760,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50] ; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> @@ -2801,7 +2801,7 @@ ; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [27:1.00] ; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [20:1.00] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %2) @@ -2844,7 +2844,7 @@ ; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [27:1.00] ; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [20:1.00] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %2) @@ -2882,7 +2882,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fsub <4 x double> %1, %2 @@ -2918,7 +2918,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fsub <8 x float> %1, %2 @@ -2969,7 +2969,7 @@ ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %2) @@ -3026,7 +3026,7 @@ ; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %2) @@ -3079,7 +3079,7 @@ ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %2) @@ -3136,7 +3136,7 @@ ; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %2) @@ -3179,7 +3179,7 @@ ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> @@ -3216,7 +3216,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> @@ -3257,7 +3257,7 @@ ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> @@ -3294,7 +3294,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> @@ -3335,7 +3335,7 @@ ; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = xor <4 x i64> %1, %2 @@ -3381,7 +3381,7 @@ ; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = xor <4 x i64> %1, %2 @@ -3417,7 +3417,7 @@ ; ZNVER1-LABEL: test_zeroall: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vzeroall -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.avx.vzeroall() ret void } @@ -3447,7 +3447,7 @@ ; ZNVER1-LABEL: test_zeroupper: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.avx.vzeroupper() ret void } Index: test/CodeGen/X86/avx2-schedule.ll =================================================================== --- test/CodeGen/X86/avx2-schedule.ll +++ test/CodeGen/X86/avx2-schedule.ll @@ -24,7 +24,7 @@ ; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) %2 = load <32 x i8>, <32 x i8> *%a1, align 32 %3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2) @@ -53,7 +53,7 @@ ; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) %2 = load <8 x i32>, <8 x i32> *%a1, align 32 %3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2) @@ -82,7 +82,7 @@ ; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) %2 = load <16 x i16>, <16 x i16> *%a1, align 32 %3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2) @@ -108,7 +108,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <32 x i8> %a0, %a1 %2 = load <32 x i8>, <32 x i8> *%a2, align 32 %3 = add <32 x i8> %1, %2 @@ -132,7 +132,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <8 x i32> %a0, %a1 %2 = load <8 x i32>, <8 x i32> *%a2, align 32 %3 = add <8 x i32> %1, %2 @@ -156,7 +156,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <4 x i64> %a0, %a1 %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = add <4 x i64> %1, %2 @@ -180,7 +180,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <16 x i16> %a0, %a1 %2 = load <16 x i16>, <16 x i16> *%a2, align 32 %3 = add <16 x i16> %1, %2 @@ -207,7 +207,7 @@ ; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = and <4 x i64> %a0, %a1 %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = and <4 x i64> %1, %2 @@ -235,7 +235,7 @@ ; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = xor <4 x i64> %a0, %2 = and <4 x i64> %a1, %1 %3 = load <4 x i64>, <4 x i64> *%a2, align 32 @@ -262,7 +262,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = mul <8 x i32> %a0, %a1 %2 = load <8 x i32>, <8 x i32> *%a2, align 32 %3 = mul <8 x i32> %1, %2 @@ -286,7 +286,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = mul <16 x i16> %a0, %a1 %2 = load <16 x i16>, <16 x i16> *%a2, align 32 %3 = mul <16 x i16> %1, %2 @@ -313,7 +313,7 @@ ; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = or <4 x i64> %a0, %a1 %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = or <4 x i64> %1, %2 @@ -338,7 +338,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <32 x i8> %a0, %a1 %2 = load <32 x i8>, <32 x i8> *%a2, align 32 %3 = sub <32 x i8> %1, %2 @@ -362,7 +362,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <8 x i32> %a0, %a1 %2 = load <8 x i32>, <8 x i32> *%a2, align 32 %3 = sub <8 x i32> %1, %2 @@ -386,7 +386,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <4 x i64> %a0, %a1 %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = sub <4 x i64> %1, %2 @@ -410,7 +410,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <16 x i16> %a0, %a1 %2 = load <16 x i16>, <16 x i16> *%a2, align 32 %3 = sub <16 x i16> %1, %2 @@ -437,7 +437,7 @@ ; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = xor <4 x i64> %a0, %a1 %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = xor <4 x i64> %1, %2 Index: test/CodeGen/X86/bmi-schedule.ll =================================================================== --- test/CodeGen/X86/bmi-schedule.ll +++ test/CodeGen/X86/bmi-schedule.ll @@ -41,7 +41,7 @@ ; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50] ; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25] ; ZNVER1-NEXT: # kill: %AX %AX %EAX -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a2 %2 = xor i16 %a0, -1 %3 = and i16 %2, %a1 @@ -77,7 +77,7 @@ ; ZNVER1-NEXT: andnl (%rdx), %edi, %eax # sched: [5:0.50] ; ZNVER1-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.25] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 %2 = xor i32 %a0, -1 %3 = and i32 %2, %a1 @@ -113,7 +113,7 @@ ; ZNVER1-NEXT: andnq (%rdx), %rdi, %rax # sched: [5:0.50] ; ZNVER1-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.25] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 %2 = xor i64 %a0, -1 %3 = and i64 %2, %a1 @@ -149,7 +149,7 @@ ; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx ; ZNVER1-NEXT: bextrl %edi, %esi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %1, i32 %a0) %3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a1, i32 %a0) @@ -185,7 +185,7 @@ ; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx ; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 %2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %1, i64 %a0) %3 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a1, i64 %a0) @@ -221,7 +221,7 @@ ; ZNVER1-NEXT: blsil (%rsi), %ecx ; ZNVER1-NEXT: blsil %edi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a1 %2 = sub i32 0, %1 %3 = sub i32 0, %a0 @@ -258,7 +258,7 @@ ; ZNVER1-NEXT: blsiq (%rsi), %rcx ; ZNVER1-NEXT: blsiq %rdi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a1 %2 = sub i64 0, %1 %3 = sub i64 0, %a0 @@ -295,7 +295,7 @@ ; ZNVER1-NEXT: blsmskl (%rsi), %ecx ; ZNVER1-NEXT: blsmskl %edi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a1 %2 = sub i32 %1, 1 %3 = sub i32 %a0, 1 @@ -332,7 +332,7 @@ ; ZNVER1-NEXT: blsmskq (%rsi), %rcx ; ZNVER1-NEXT: blsmskq %rdi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a1 %2 = sub i64 %1, 1 %3 = sub i64 %a0, 1 @@ -369,7 +369,7 @@ ; ZNVER1-NEXT: blsrl (%rsi), %ecx ; ZNVER1-NEXT: blsrl %edi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a1 %2 = sub i32 %1, 1 %3 = sub i32 %a0, 1 @@ -406,7 +406,7 @@ ; ZNVER1-NEXT: blsrq (%rsi), %rcx ; ZNVER1-NEXT: blsrq %rdi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a1 %2 = sub i64 %1, 1 %3 = sub i64 %a0, 1 @@ -447,7 +447,7 @@ ; ZNVER1-NEXT: tzcntw %di, %ax ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] ; ZNVER1-NEXT: # kill: %AX %AX %EAX -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a1 %2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false ) %3 = tail call i16 @llvm.cttz.i16( i16 %a0, i1 false ) @@ -483,7 +483,7 @@ ; ZNVER1-NEXT: tzcntl (%rsi), %ecx ; ZNVER1-NEXT: tzcntl %edi, %eax ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a1 %2 = tail call i32 @llvm.cttz.i32( i32 %1, i1 false ) %3 = tail call i32 @llvm.cttz.i32( i32 %a0, i1 false ) @@ -519,7 +519,7 @@ ; ZNVER1-NEXT: tzcntq (%rsi), %rcx ; ZNVER1-NEXT: tzcntq %rdi, %rax ; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a1 %2 = tail call i64 @llvm.cttz.i64( i64 %1, i1 false ) %3 = tail call i64 @llvm.cttz.i64( i64 %a0, i1 false ) Index: test/CodeGen/X86/bmi2-schedule.ll =================================================================== --- test/CodeGen/X86/bmi2-schedule.ll +++ test/CodeGen/X86/bmi2-schedule.ll @@ -25,7 +25,7 @@ ; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx ; ZNVER1-NEXT: bzhil %edi, %esi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %1, i32 %a0) %3 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a1, i32 %a0) @@ -54,7 +54,7 @@ ; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx ; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %1, i64 %a0) %3 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a1, i64 %a0) @@ -83,7 +83,7 @@ ; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx ; ZNVER1-NEXT: pdepl %esi, %edi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %1) %3 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1) @@ -112,7 +112,7 @@ ; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx ; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %1) %3 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1) @@ -141,7 +141,7 @@ ; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx ; ZNVER1-NEXT: pextl %esi, %edi, %eax ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %1) %3 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1) @@ -170,7 +170,7 @@ ; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx ; ZNVER1-NEXT: pextq %rsi, %rdi, %rax ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %1) %3 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1) Index: test/CodeGen/X86/f16c-schedule.ll =================================================================== --- test/CodeGen/X86/f16c-schedule.ll +++ test/CodeGen/X86/f16c-schedule.ll @@ -40,7 +40,7 @@ ; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x i16>, <8 x i16> *%a1 %2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %1) %3 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) @@ -83,7 +83,7 @@ ; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x i16>, <8 x i16> *%a1 %2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %1) %3 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) @@ -121,7 +121,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) %2 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a1, i32 0) %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <4 x i32> @@ -163,7 +163,7 @@ ; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [12:1.00] ; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) %2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0) store <8 x i16> %2, <8 x i16> *%a2 Index: test/CodeGen/X86/lea32-schedule.ll =================================================================== --- test/CodeGen/X86/lea32-schedule.ll +++ test/CodeGen/X86/lea32-schedule.ll @@ -57,7 +57,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = add nsw i32 %0, -24 ret i32 %2 } @@ -109,7 +109,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = add nsw i32 %0, 1024 ret i32 %2 } @@ -169,7 +169,7 @@ ; ZNVER1-NEXT: # kill: %ESI %ESI %RSI ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add nsw i32 %1, %0 ret i32 %3 } @@ -230,7 +230,7 @@ ; ZNVER1-NEXT: # kill: %ESI %ESI %RSI ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add i32 %0, 16 %4 = add i32 %3, %1 ret i32 %4 @@ -294,7 +294,7 @@ ; ZNVER1-NEXT: # kill: %ESI %ESI %RSI ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add i32 %0, -4096 %4 = add i32 %3, %1 ret i32 %4 @@ -347,7 +347,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i32 %0, 3 ret i32 %2 } @@ -401,7 +401,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i32 %0, 3 %3 = add nsw i32 %2, -32 ret i32 %3 @@ -458,7 +458,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i32 %0, 9 %3 = add nsw i32 %2, 10000 ret i32 %3 @@ -518,7 +518,7 @@ ; ZNVER1-NEXT: # kill: %ESI %ESI %RSI ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i32 %1, 1 %4 = add nsw i32 %3, %0 ret i32 %4 @@ -580,7 +580,7 @@ ; ZNVER1-NEXT: # kill: %ESI %ESI %RSI ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i32 %1, 2 %4 = add i32 %0, 96 %5 = add i32 %4, %3 @@ -645,7 +645,7 @@ ; ZNVER1-NEXT: # kill: %ESI %ESI %RSI ; ZNVER1-NEXT: # kill: %EDI %EDI %RDI ; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i32 %1, 3 %4 = add i32 %0, -1200 %5 = add i32 %4, %3 Index: test/CodeGen/X86/lea64-schedule.ll =================================================================== --- test/CodeGen/X86/lea64-schedule.ll +++ test/CodeGen/X86/lea64-schedule.ll @@ -50,7 +50,7 @@ ; ZNVER1-LABEL: test_lea_offset: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq -24(%rdi), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = add nsw i64 %0, -24 ret i64 %2 } @@ -95,7 +95,7 @@ ; ZNVER1-LABEL: test_lea_offset_big: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = add nsw i64 %0, 1024 ret i64 %2 } @@ -141,7 +141,7 @@ ; ZNVER1-LABEL: test_lea_add: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add nsw i64 %1, %0 ret i64 %3 } @@ -188,7 +188,7 @@ ; ZNVER1-LABEL: test_lea_add_offset: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add i64 %0, 16 %4 = add i64 %3, %1 ret i64 %4 @@ -238,7 +238,7 @@ ; ZNVER1-LABEL: test_lea_add_offset_big: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = add i64 %0, -4096 %4 = add i64 %3, %1 ret i64 %4 @@ -284,7 +284,7 @@ ; ZNVER1-LABEL: test_lea_mul: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i64 %0, 3 ret i64 %2 } @@ -331,7 +331,7 @@ ; ZNVER1-LABEL: test_lea_mul_offset: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i64 %0, 3 %3 = add nsw i64 %2, -32 ret i64 %3 @@ -381,7 +381,7 @@ ; ZNVER1-LABEL: test_lea_mul_offset_big: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %2 = mul nsw i64 %0, 9 %3 = add nsw i64 %2, 10000 ret i64 %3 @@ -427,7 +427,7 @@ ; ZNVER1-LABEL: test_lea_add_scale: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i64 %1, 1 %4 = add nsw i64 %3, %0 ret i64 %4 @@ -475,7 +475,7 @@ ; ZNVER1-LABEL: test_lea_add_scale_offset: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i64 %1, 2 %4 = add i64 %0, 96 %5 = add i64 %4, %3 @@ -526,7 +526,7 @@ ; ZNVER1-LABEL: test_lea_add_scale_offset_big: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %3 = shl i64 %1, 3 %4 = add i64 %0, -1200 %5 = add i64 %4, %3 Index: test/CodeGen/X86/lzcnt-schedule.ll =================================================================== --- test/CodeGen/X86/lzcnt-schedule.ll +++ test/CodeGen/X86/lzcnt-schedule.ll @@ -37,7 +37,7 @@ ; ZNVER1-NEXT: lzcntw %di, %ax ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] ; ZNVER1-NEXT: # kill: %AX %AX %EAX -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a1 %2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false ) %3 = tail call i16 @llvm.ctlz.i16( i16 %a0, i1 false ) @@ -73,7 +73,7 @@ ; ZNVER1-NEXT: lzcntl (%rsi), %ecx ; ZNVER1-NEXT: lzcntl %edi, %eax ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a1 %2 = tail call i32 @llvm.ctlz.i32( i32 %1, i1 false ) %3 = tail call i32 @llvm.ctlz.i32( i32 %a0, i1 false ) @@ -109,7 +109,7 @@ ; ZNVER1-NEXT: lzcntq (%rsi), %rcx ; ZNVER1-NEXT: lzcntq %rdi, %rax ; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a1 %2 = tail call i64 @llvm.ctlz.i64( i64 %1, i1 false ) %3 = tail call i64 @llvm.ctlz.i64( i64 %a0, i1 false ) Index: test/CodeGen/X86/popcnt-schedule.ll =================================================================== --- test/CodeGen/X86/popcnt-schedule.ll +++ test/CodeGen/X86/popcnt-schedule.ll @@ -57,7 +57,7 @@ ; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00] ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] ; ZNVER1-NEXT: # kill: %AX %AX %EAX -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i16, i16 *%a1 %2 = tail call i16 @llvm.ctpop.i16( i16 %1 ) %3 = tail call i16 @llvm.ctpop.i16( i16 %a0 ) @@ -107,7 +107,7 @@ ; ZNVER1-NEXT: popcntl (%rsi), %ecx # sched: [10:1.00] ; ZNVER1-NEXT: popcntl %edi, %eax # sched: [3:1.00] ; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a1 %2 = tail call i32 @llvm.ctpop.i32( i32 %1 ) %3 = tail call i32 @llvm.ctpop.i32( i32 %a0 ) @@ -157,7 +157,7 @@ ; ZNVER1-NEXT: popcntq (%rsi), %rcx # sched: [10:1.00] ; ZNVER1-NEXT: popcntq %rdi, %rax # sched: [3:1.00] ; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a1 %2 = tail call i64 @llvm.ctpop.i64( i64 %1 ) %3 = tail call i64 @llvm.ctpop.i64( i64 %a0 ) Index: test/CodeGen/X86/sse-schedule.ll =================================================================== --- test/CodeGen/X86/sse-schedule.ll +++ test/CodeGen/X86/sse-schedule.ll @@ -50,7 +50,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fadd <4 x float> %1, %2 @@ -98,7 +98,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fadd float %1, %2 @@ -150,7 +150,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = and <4 x i32> %1, %2 @@ -206,7 +206,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = xor <4 x i32> %1, @@ -267,7 +267,7 @@ ; ZNVER1-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] ; ZNVER1-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fcmp oeq <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fcmp oeq <4 x float> %a0, %2 @@ -318,7 +318,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = insertelement <4 x float> undef, float %a1, i32 0 %3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0) @@ -427,7 +427,7 @@ ; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25] ; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25] ; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 4 %3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2) @@ -484,7 +484,7 @@ ; ZNVER1-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp i32 %a0 to float %2 = load i32, i32 *%a1, align 4 %3 = sitofp i32 %2 to float @@ -540,7 +540,7 @@ ; ZNVER1-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp i64 %a0 to float %2 = load i64, i64 *%a1, align 8 %3 = sitofp i64 %2 to float @@ -596,7 +596,7 @@ ; ZNVER1-NEXT: vcvtss2si (%rdi), %eax # sched: [12:1.00] ; ZNVER1-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -655,7 +655,7 @@ ; ZNVER1-NEXT: vcvtss2si (%rdi), %rax # sched: [12:1.00] ; ZNVER1-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -714,7 +714,7 @@ ; ZNVER1-NEXT: vcvttss2si (%rdi), %eax # sched: [12:1.00] ; ZNVER1-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi float %a0 to i32 %2 = load float, float *%a1, align 4 %3 = fptosi float %2 to i32 @@ -770,7 +770,7 @@ ; ZNVER1-NEXT: vcvttss2si (%rdi), %rax # sched: [12:1.00] ; ZNVER1-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi float %a0 to i64 %2 = load float, float *%a1, align 4 %3 = fptosi float %2 to i64 @@ -819,7 +819,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00] ; ZNVER1-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fdiv <4 x float> %1, %2 @@ -867,7 +867,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00] ; ZNVER1-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fdiv float %1, %2 @@ -915,7 +915,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50] ; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = alloca i32, align 4 %2 = bitcast i32* %1 to i8* store i32 %a0, i32* %1 @@ -965,7 +965,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2) @@ -1014,7 +1014,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2) @@ -1063,7 +1063,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2) @@ -1112,7 +1112,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2) @@ -1168,7 +1168,7 @@ ; ZNVER1-NEXT: vmovaps (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x float>, <4 x float> *%a0, align 16 %2 = fadd <4 x float> %1, %1 store <4 x float> %2, <4 x float> *%a1, align 16 @@ -1217,7 +1217,7 @@ ; ZNVER1-LABEL: test_movhlps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> ret <4 x float> %1 } @@ -1274,7 +1274,7 @@ ; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast x86_mmx* %a2 to <2 x float>* %2 = load <2 x float>, <2 x float> *%1, align 8 %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> @@ -1328,7 +1328,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %2 = fadd <4 x float> %a1, %1 ret <4 x float> %2 @@ -1382,7 +1382,7 @@ ; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast x86_mmx* %a2 to <2 x float>* %2 = load <2 x float>, <2 x float> *%1, align 8 %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> @@ -1429,7 +1429,7 @@ ; ZNVER1-LABEL: test_movmskps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ret i32 %1 } @@ -1475,7 +1475,7 @@ ; ZNVER1-LABEL: test_movntps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0 ret void } @@ -1528,7 +1528,7 @@ ; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50] ; ZNVER1-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovss %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load float, float* %a0, align 1 %2 = fadd float %1, %1 store float %2, float *%a1, align 1 @@ -1575,7 +1575,7 @@ ; ZNVER1-LABEL: test_movss_reg: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> ret <4 x float> %1 } @@ -1628,7 +1628,7 @@ ; ZNVER1-NEXT: vmovups (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovups %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x float>, <4 x float> *%a0, align 1 %2 = fadd <4 x float> %1, %1 store <4 x float> %2, <4 x float> *%a1, align 1 @@ -1676,7 +1676,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fmul <4 x float> %1, %2 @@ -1724,7 +1724,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fmul float %1, %2 @@ -1776,7 +1776,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = or <4 x i32> %1, %2 @@ -1827,7 +1827,7 @@ ; ZNVER1-LABEL: test_prefetchnta: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: prefetchnta (%rdi) # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1) ret void } @@ -1883,7 +1883,7 @@ ; ZNVER1-NEXT: vrcpps (%rdi), %xmm1 # sched: [12:0.50] ; ZNVER1-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2) @@ -1949,7 +1949,7 @@ ; ZNVER1-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50] ; ZNVER1-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -2010,7 +2010,7 @@ ; ZNVER1-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [12:0.50] ; ZNVER1-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2) @@ -2076,7 +2076,7 @@ ; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:0.50] ; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -2127,7 +2127,7 @@ ; ZNVER1-LABEL: test_sfence: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: sfence # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.sse.sfence() ret void } @@ -2178,7 +2178,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50] ; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> @@ -2234,7 +2234,7 @@ ; ZNVER1-NEXT: vsqrtps (%rdi), %xmm1 # sched: [27:1.00] ; ZNVER1-NEXT: vsqrtps %xmm0, %xmm0 # sched: [20:1.00] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2) @@ -2300,7 +2300,7 @@ ; ZNVER1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00] ; ZNVER1-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2) @@ -2350,7 +2350,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:0.50] ; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = alloca i32, align 4 %2 = bitcast i32* %1 to i8* call void @llvm.x86.sse.stmxcsr(i8* %2) @@ -2400,7 +2400,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fsub <4 x float> %1, %2 @@ -2448,7 +2448,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fsub float %1, %2 @@ -2552,7 +2552,7 @@ ; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25] ; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25] ; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 4 %3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2) @@ -2606,7 +2606,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> @@ -2658,7 +2658,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> @@ -2710,7 +2710,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = xor <4 x i32> %1, %2 Index: test/CodeGen/X86/sse2-schedule.ll =================================================================== --- test/CodeGen/X86/sse2-schedule.ll +++ test/CodeGen/X86/sse2-schedule.ll @@ -50,7 +50,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <2 x double> %a0, %a1 %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = fadd <2 x double> %1, %2 @@ -98,7 +98,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd double %a0, %a1 %2 = load double, double *%a2, align 8 %3 = fadd double %1, %2 @@ -153,7 +153,7 @@ ; ZNVER1-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <2 x double> %a0 to <4 x i32> %2 = bitcast <2 x double> %a1 to <4 x i32> %3 = and <4 x i32> %1, %2 @@ -213,7 +213,7 @@ ; ZNVER1-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <2 x double> %a0 to <4 x i32> %2 = bitcast <2 x double> %a1 to <4 x i32> %3 = xor <4 x i32> %1, @@ -275,7 +275,7 @@ ; ZNVER1-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] ; ZNVER1-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fcmp oeq <2 x double> %a0, %a1 %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = fcmp oeq <2 x double> %a0, %2 @@ -326,7 +326,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <2 x double> undef, double %a0, i32 0 %2 = insertelement <2 x double> undef, double %a1, i32 0 %3 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %1, <2 x double> %2, i8 0) @@ -435,7 +435,7 @@ ; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25] ; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25] ; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 8 %3 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %2) @@ -492,7 +492,7 @@ ; ZNVER1-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> %2 = sitofp <2 x i32> %1 to <2 x double> %3 = load <4 x i32>, <4 x i32>*%a1, align 16 @@ -551,7 +551,7 @@ ; ZNVER1-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp <4 x i32> %a0 to <4 x float> %2 = load <4 x i32>, <4 x i32>*%a1, align 16 %3 = sitofp <4 x i32> %2 to <4 x float> @@ -608,7 +608,7 @@ ; ZNVER1-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %2) @@ -666,7 +666,7 @@ ; ZNVER1-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %2) @@ -724,7 +724,7 @@ ; ZNVER1-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %2) @@ -782,7 +782,7 @@ ; ZNVER1-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> %2 = fpext <2 x float> %1 to <2 x double> %3 = load <4 x float>, <4 x float> *%a1, align 16 @@ -840,7 +840,7 @@ ; ZNVER1-NEXT: vcvtsd2si (%rdi), %eax # sched: [12:1.00] ; ZNVER1-NEXT: vcvtsd2si %xmm0, %ecx # sched: [5:1.00] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <2 x double> undef, double %a0, i32 0 %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %1) %3 = load double, double *%a1, align 8 @@ -899,7 +899,7 @@ ; ZNVER1-NEXT: vcvtsd2si (%rdi), %rax # sched: [12:1.00] ; ZNVER1-NEXT: vcvtsd2si %xmm0, %rcx # sched: [5:1.00] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <2 x double> undef, double %a0, i32 0 %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %1) %3 = load double, double *%a1, align 8 @@ -966,7 +966,7 @@ ; ZNVER1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [5:1.00] ; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptrunc double %a0 to float %2 = load double, double *%a1, align 8 %3 = fptrunc double %2 to float @@ -1022,7 +1022,7 @@ ; ZNVER1-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp i32 %a0 to double %2 = load i32, i32 *%a1, align 8 %3 = sitofp i32 %2 to double @@ -1078,7 +1078,7 @@ ; ZNVER1-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp i64 %a0 to double %2 = load i64, i64 *%a1, align 8 %3 = sitofp i64 %2 to double @@ -1144,7 +1144,7 @@ ; ZNVER1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [5:1.00] ; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fpext float %a0 to double %2 = load float, float *%a1, align 4 %3 = fpext float %2 to double @@ -1201,7 +1201,7 @@ ; ZNVER1-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi <2 x double> %a0 to <2 x i32> %2 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> %3 = load <2 x double>, <2 x double> *%a1, align 16 @@ -1260,7 +1260,7 @@ ; ZNVER1-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi <4 x float> %a0 to <4 x i32> %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = fptosi <4 x float> %2 to <4 x i32> @@ -1316,7 +1316,7 @@ ; ZNVER1-NEXT: vcvttsd2si (%rdi), %eax # sched: [12:1.00] ; ZNVER1-NEXT: vcvttsd2si %xmm0, %ecx # sched: [5:1.00] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi double %a0 to i32 %2 = load double, double *%a1, align 8 %3 = fptosi double %2 to i32 @@ -1372,7 +1372,7 @@ ; ZNVER1-NEXT: vcvttsd2si (%rdi), %rax # sched: [12:1.00] ; ZNVER1-NEXT: vcvttsd2si %xmm0, %rcx # sched: [5:1.00] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi double %a0 to i64 %2 = load double, double *%a1, align 8 %3 = fptosi double %2 to i64 @@ -1421,7 +1421,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [15:1.00] ; ZNVER1-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv <2 x double> %a0, %a1 %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = fdiv <2 x double> %1, %2 @@ -1469,7 +1469,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [15:1.00] ; ZNVER1-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv double %a0, %a1 %2 = load double, double *%a2, align 8 %3 = fdiv double %1, %2 @@ -1516,7 +1516,7 @@ ; ZNVER1-LABEL: test_lfence: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: lfence # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.sse2.lfence() ret void } @@ -1562,7 +1562,7 @@ ; ZNVER1-LABEL: test_mfence: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: mfence # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.sse2.mfence() ret void } @@ -1606,7 +1606,7 @@ ; ZNVER1-LABEL: test_maskmovdqu: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) ret void } @@ -1653,7 +1653,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %1, <2 x double> %2) @@ -1702,7 +1702,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %1, <2 x double> %2) @@ -1751,7 +1751,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %1, <2 x double> %2) @@ -1800,7 +1800,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %1, <2 x double> %2) @@ -1856,7 +1856,7 @@ ; ZNVER1-NEXT: vmovapd (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <2 x double>, <2 x double> *%a0, align 16 %2 = fadd <2 x double> %1, %1 store <2 x double> %2, <2 x double> *%a1, align 16 @@ -1911,7 +1911,7 @@ ; ZNVER1-NEXT: vmovdqa (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <2 x i64>, <2 x i64> *%a0, align 16 %2 = add <2 x i64> %1, %1 store <2 x i64> %2, <2 x i64> *%a1, align 16 @@ -1966,7 +1966,7 @@ ; ZNVER1-NEXT: vmovdqu (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <2 x i64>, <2 x i64> *%a0, align 1 %2 = add <2 x i64> %1, %1 store <2 x i64> %2, <2 x i64> *%a1, align 1 @@ -2042,7 +2042,7 @@ ; ZNVER1-NEXT: vmovd %xmm1, (%rsi) # sched: [1:0.50] ; ZNVER1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vmovd %xmm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x i32> undef, i32 %a1, i32 0 %2 = load i32, i32 *%a2 %3 = insertelement <4 x i32> undef, i32 %2, i32 0 @@ -2123,7 +2123,7 @@ ; ZNVER1-NEXT: vmovq %xmm1, (%rsi) # sched: [1:0.50] ; ZNVER1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vmovq %xmm0, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <2 x i64> undef, i64 %a1, i64 0 %2 = load i64, i64 *%a2 %3 = insertelement <2 x i64> undef, i64 %2, i64 0 @@ -2183,7 +2183,7 @@ ; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast x86_mmx* %a2 to double* %2 = load double, double *%1, align 8 %3 = insertelement <2 x double> %a1, double %2, i32 1 @@ -2241,7 +2241,7 @@ ; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast x86_mmx* %a2 to double* %2 = load double, double *%1, align 8 %3 = insertelement <2 x double> %a1, double %2, i32 0 @@ -2287,7 +2287,7 @@ ; ZNVER1-LABEL: test_movmskpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) ret i32 %1 } @@ -2336,7 +2336,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <2 x i64> %a0, %a0 store <2 x i64> %1, <2 x i64> *%a1, align 16, !nontemporal !0 ret void @@ -2383,7 +2383,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <2 x double> %a0, %a0 store <2 x double> %1, <2 x double> *%a1, align 16, !nontemporal !0 ret void @@ -2437,7 +2437,7 @@ ; ZNVER1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vmovq %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64* %a1, align 1 %2 = insertelement <2 x i64> zeroinitializer, i64 %1, i32 0 %3 = add <2 x i64> %a0, %2 @@ -2489,7 +2489,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> %2 = add <2 x i64> %a1, %1 ret <2 x i64> %2 @@ -2543,7 +2543,7 @@ ; ZNVER1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [8:0.50] ; ZNVER1-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load double, double* %a0, align 1 %2 = fadd double %1, %1 store double %2, double *%a1, align 1 @@ -2591,7 +2591,7 @@ ; ZNVER1-LABEL: test_movsd_reg: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> ret <2 x double> %1 } @@ -2644,7 +2644,7 @@ ; ZNVER1-NEXT: vmovupd (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <2 x double>, <2 x double> *%a0, align 1 %2 = fadd <2 x double> %1, %1 store <2 x double> %2, <2 x double> *%a1, align 1 @@ -2692,7 +2692,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul <2 x double> %a0, %a1 %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = fmul <2 x double> %1, %2 @@ -2740,7 +2740,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00] ; ZNVER1-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul double %a0, %a1 %2 = load double, double *%a2, align 8 %3 = fmul double %1, %2 @@ -2795,7 +2795,7 @@ ; ZNVER1-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <2 x double> %a0 to <4 x i32> %2 = bitcast <2 x double> %a1 to <4 x i32> %3 = or <4 x i32> %1, %2 @@ -2852,7 +2852,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) %2 = bitcast <8 x i16> %1 to <4 x i32> %3 = load <4 x i32>, <4 x i32> *%a2, align 16 @@ -2906,7 +2906,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) %2 = bitcast <16 x i8> %1 to <8 x i16> %3 = load <8 x i16>, <8 x i16> *%a2, align 16 @@ -2960,7 +2960,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) %2 = bitcast <16 x i8> %1 to <8 x i16> %3 = load <8 x i16>, <8 x i16> *%a2, align 16 @@ -3014,7 +3014,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <16 x i8> %a0, %a1 %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = add <16 x i8> %1, %2 @@ -3066,7 +3066,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <4 x i32> %a0, %a1 %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = add <4 x i32> %1, %2 @@ -3114,7 +3114,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <2 x i64> %a0, %a1 %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = add <2 x i64> %1, %2 @@ -3166,7 +3166,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %1, <16 x i8> %2) @@ -3219,7 +3219,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %1, <8 x i16> %2) @@ -3272,7 +3272,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %1, <16 x i8> %2) @@ -3325,7 +3325,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %1, <8 x i16> %2) @@ -3378,7 +3378,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <8 x i16> %a0, %a1 %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = add <8 x i16> %1, %2 @@ -3433,7 +3433,7 @@ ; ZNVER1-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = and <2 x i64> %a0, %a1 %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = and <2 x i64> %1, %2 @@ -3495,7 +3495,7 @@ ; ZNVER1-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = xor <2 x i64> %a0, %2 = and <2 x i64> %a1, %1 %3 = load <2 x i64>, <2 x i64> *%a2, align 16 @@ -3550,7 +3550,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %1, <16 x i8> %2) @@ -3603,7 +3603,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %1, <8 x i16> %2) @@ -3661,7 +3661,7 @@ ; ZNVER1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp eq <16 x i8> %a0, %a1 %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = icmp eq <16 x i8> %a0, %2 @@ -3720,7 +3720,7 @@ ; ZNVER1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp eq <4 x i32> %a0, %a1 %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = icmp eq <4 x i32> %a0, %2 @@ -3779,7 +3779,7 @@ ; ZNVER1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp eq <8 x i16> %a0, %a1 %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = icmp eq <8 x i16> %a0, %2 @@ -3839,7 +3839,7 @@ ; ZNVER1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp sgt <16 x i8> %a0, %a1 %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = icmp sgt <16 x i8> %a0, %2 @@ -3899,7 +3899,7 @@ ; ZNVER1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp sgt <4 x i32> %a0, %a1 %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = icmp eq <4 x i32> %a0, %2 @@ -3959,7 +3959,7 @@ ; ZNVER1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp sgt <8 x i16> %a0, %a1 %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = icmp sgt <8 x i16> %a0, %2 @@ -4009,7 +4009,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: # kill: %AX %AX %EAX -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = extractelement <8 x i16> %a0, i32 6 ret i16 %1 } @@ -4059,7 +4059,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <8 x i16> %a0, i16 %a1, i32 1 %2 = load i16, i16 *%a2 %3 = insertelement <8 x i16> %1, i16 %2, i32 3 @@ -4115,7 +4115,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) %2 = bitcast <4 x i32> %1 to <8 x i16> %3 = load <8 x i16>, <8 x i16> *%a2, align 16 @@ -4169,7 +4169,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %1, <8 x i16> %2) @@ -4222,7 +4222,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %1, <16 x i8> %2) @@ -4275,7 +4275,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %1, <8 x i16> %2) @@ -4328,7 +4328,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %1, <16 x i8> %2) @@ -4372,7 +4372,7 @@ ; ZNVER1-LABEL: test_pmovmskb: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) ret i32 %1 } @@ -4419,7 +4419,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %1, <8 x i16> %2) @@ -4468,7 +4468,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %1, <8 x i16> %2) @@ -4517,7 +4517,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = mul <8 x i16> %a0, %a1 %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = mul <8 x i16> %1, %2 @@ -4573,7 +4573,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) %2 = bitcast <2 x i64> %1 to <4 x i32> %3 = load <4 x i32>, <4 x i32> *%a2, align 16 @@ -4630,7 +4630,7 @@ ; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = or <2 x i64> %a0, %a1 %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = or <2 x i64> %1, %2 @@ -4687,7 +4687,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) %2 = bitcast <2 x i64> %1 to <16 x i8> %3 = load <16 x i8>, <16 x i8> *%a2, align 16 @@ -4746,7 +4746,7 @@ ; ZNVER1-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50] ; ZNVER1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.25] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> %2 = load <4 x i32>, <4 x i32> *%a1, align 16 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> @@ -4804,7 +4804,7 @@ ; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [8:0.50] ; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.25] ; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> %2 = load <8 x i16>, <8 x i16> *%a1, align 16 %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> @@ -4862,7 +4862,7 @@ ; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [8:0.50] ; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.25] ; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> %2 = load <8 x i16>, <8 x i16> *%a1, align 16 %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> @@ -4918,7 +4918,7 @@ ; ZNVER1-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %1, <4 x i32> %2) @@ -4968,7 +4968,7 @@ ; ZNVER1-LABEL: test_pslldq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> ret <4 x i32> %1 } @@ -5021,7 +5021,7 @@ ; ZNVER1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %1, <2 x i64> %2) @@ -5079,7 +5079,7 @@ ; ZNVER1-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %1, <8 x i16> %2) @@ -5137,7 +5137,7 @@ ; ZNVER1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> %2) @@ -5195,7 +5195,7 @@ ; ZNVER1-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> %2) @@ -5253,7 +5253,7 @@ ; ZNVER1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %1, <4 x i32> %2) @@ -5303,7 +5303,7 @@ ; ZNVER1-LABEL: test_psrldq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> ret <4 x i32> %1 } @@ -5356,7 +5356,7 @@ ; ZNVER1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %1, <2 x i64> %2) @@ -5414,7 +5414,7 @@ ; ZNVER1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %1, <8 x i16> %2) @@ -5469,7 +5469,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <16 x i8> %a0, %a1 %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = sub <16 x i8> %1, %2 @@ -5521,7 +5521,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <4 x i32> %a0, %a1 %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = sub <4 x i32> %1, %2 @@ -5569,7 +5569,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <2 x i64> %a0, %a1 %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = sub <2 x i64> %1, %2 @@ -5621,7 +5621,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %1, <16 x i8> %2) @@ -5674,7 +5674,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %1, <8 x i16> %2) @@ -5727,7 +5727,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %1, <16 x i8> %2) @@ -5780,7 +5780,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %1, <8 x i16> %2) @@ -5833,7 +5833,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sub <8 x i16> %a0, %a1 %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = sub <8 x i16> %1, %2 @@ -5885,7 +5885,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.25] ; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -5942,7 +5942,7 @@ ; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25] ; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [8:0.50] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> @@ -5998,7 +5998,7 @@ ; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.25] ; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> @@ -6051,7 +6051,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25] ; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> @@ -6103,7 +6103,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25] ; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -6160,7 +6160,7 @@ ; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.25] ; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [8:0.50] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> @@ -6216,7 +6216,7 @@ ; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.25] ; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> @@ -6269,7 +6269,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25] ; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> @@ -6324,7 +6324,7 @@ ; ZNVER1-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = xor <2 x i64> %a0, %a1 %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = xor <2 x i64> %1, %2 @@ -6380,7 +6380,7 @@ ; ZNVER1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50] ; ZNVER1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> @@ -6437,7 +6437,7 @@ ; ZNVER1-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [27:1.00] ; ZNVER1-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [20:1.00] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %2) @@ -6503,7 +6503,7 @@ ; ZNVER1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [27:1.00] ; ZNVER1-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [27:1.00] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %2) @@ -6553,7 +6553,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub <2 x double> %a0, %a1 %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = fsub <2 x double> %1, %2 @@ -6601,7 +6601,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub double %a0, %a1 %2 = load double, double *%a2, align 8 %3 = fsub double %1, %2 @@ -6705,7 +6705,7 @@ ; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25] ; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25] ; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 8 %3 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %2) @@ -6762,7 +6762,7 @@ ; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> @@ -6824,7 +6824,7 @@ ; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> @@ -6880,7 +6880,7 @@ ; ZNVER1-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <2 x double> %a0 to <4 x i32> %2 = bitcast <2 x double> %a1 to <4 x i32> %3 = xor <4 x i32> %1, %2 Index: test/CodeGen/X86/sse3-schedule.ll =================================================================== --- test/CodeGen/X86/sse3-schedule.ll +++ test/CodeGen/X86/sse3-schedule.ll @@ -50,7 +50,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %1, <2 x double> %2) @@ -99,7 +99,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %1, <4 x float> %2) @@ -148,7 +148,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %1, <2 x double> %2) @@ -197,7 +197,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %2) @@ -246,7 +246,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %1, <2 x double> %2) @@ -295,7 +295,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %1, <4 x float> %2) @@ -339,7 +339,7 @@ ; ZNVER1-LABEL: test_lddqu: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vlddqu (%rdi), %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0) ret <16 x i8> %1 } @@ -393,7 +393,7 @@ ; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25] ; ZNVER1-NEXT: movl %esi, %ecx # sched: [1:0.25] ; ZNVER1-NEXT: monitor # sched: [100:?] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.sse3.monitor(i8* %a0, i32 %a1, i32 %a2) ret void } @@ -448,7 +448,7 @@ ; ZNVER1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [8:0.50] ; ZNVER1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer @@ -505,7 +505,7 @@ ; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [8:0.50] ; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> @@ -562,7 +562,7 @@ ; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [8:0.50] ; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> @@ -618,7 +618,7 @@ ; ZNVER1-NEXT: movl %edi, %ecx # sched: [1:0.25] ; ZNVER1-NEXT: movl %esi, %eax # sched: [1:0.25] ; ZNVER1-NEXT: mwait # sched: [100:?] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.sse3.mwait(i32 %a0, i32 %a1) ret void } Index: test/CodeGen/X86/sse41-schedule.ll =================================================================== --- test/CodeGen/X86/sse41-schedule.ll +++ test/CodeGen/X86/sse41-schedule.ll @@ -49,7 +49,7 @@ ; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = fadd <2 x double> %a1, %1 @@ -92,7 +92,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50] ; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> @@ -140,7 +140,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) %2 = load <2 x double>, <2 x double> *%a3, align 16 %3 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %1, <2 x double> %2, <2 x double> %a2) @@ -189,7 +189,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) %2 = load <4 x float>, <4 x float> *%a3 %3 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %1, <4 x float> %2, <4 x float> %a2) @@ -232,7 +232,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %1, <2 x double> %2, i8 7) @@ -275,7 +275,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %1, <4 x float> %2, i8 7) @@ -318,7 +318,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50] ; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) %2 = load float, float *%a2 %3 = insertelement <4 x float> %1, float %2, i32 3 @@ -355,7 +355,7 @@ ; ZNVER1-LABEL: test_movntdqa: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %a0) ret <2 x i64> %1 } @@ -396,7 +396,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?] ; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) %2 = bitcast <8 x i16> %1 to <16 x i8> %3 = load <16 x i8>, <16 x i8> *%a2, align 16 @@ -440,7 +440,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) %2 = bitcast <8 x i16> %1 to <4 x i32> %3 = load <4 x i32>, <4 x i32> *%a2, align 16 @@ -490,7 +490,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:1.00] ; ZNVER1-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) %2 = load <16 x i8>, <16 x i8> *%a3, align 16 %3 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %1, <16 x i8> %2, <16 x i8> %a2) @@ -533,7 +533,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50] ; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> @@ -575,7 +575,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp eq <2 x i64> %a0, %a1 %2 = sext <2 x i1> %1 to <2 x i64> %3 = load <2 x i64>, <2 x i64>*%a2, align 16 @@ -619,7 +619,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [8:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = extractelement <16 x i8> %a0, i32 3 %2 = extractelement <16 x i8> %a0, i32 1 store i8 %2, i8 *%a1 @@ -662,7 +662,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [8:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = extractelement <4 x i32> %a0, i32 3 %2 = extractelement <4 x i32> %a0, i32 1 store i32 %2, i32 *%a1 @@ -704,7 +704,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.25] ; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = extractelement <2 x i64> %a0, i32 1 %2 = extractelement <2 x i64> %a0, i32 1 store i64 %2, i64 *%a2 @@ -746,7 +746,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.25] ; ZNVER1-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [8:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = extractelement <8 x i16> %a0, i32 3 %2 = extractelement <8 x i16> %a0, i32 1 store i16 %2, i16 *%a1 @@ -789,7 +789,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00] ; ZNVER1-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x i16>, <8 x i16> *%a0, align 16 %2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %1) %3 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %2) @@ -832,7 +832,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <16 x i8> %a0, i8 %a1, i32 1 %2 = load i8, i8 *%a2 %3 = insertelement <16 x i8> %1, i8 %2, i32 3 @@ -874,7 +874,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <4 x i32> %a0, i32 %a1, i32 1 %2 = load i32, i32 *%a2 %3 = insertelement <4 x i32> %1, i32 %2, i32 3 @@ -922,7 +922,7 @@ ; ZNVER1-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = insertelement <2 x i64> %a0, i64 %a2, i32 1 %2 = load i64, i64 *%a3 %3 = insertelement <2 x i64> %a1, i64 %2, i32 1 @@ -965,7 +965,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %1, <16 x i8> %2) @@ -1008,7 +1008,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %1, <4 x i32> %2) @@ -1051,7 +1051,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %1, <4 x i32> %2) @@ -1094,7 +1094,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %1, <8 x i16> %2) @@ -1137,7 +1137,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %1, <16 x i8> %2) @@ -1180,7 +1180,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %1, <4 x i32> %2) @@ -1223,7 +1223,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %1, <4 x i32> %2) @@ -1266,7 +1266,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %1, <8 x i16> %2) @@ -1316,7 +1316,7 @@ ; ZNVER1-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> %2 = sext <8 x i8> %1 to <8 x i16> %3 = load <8 x i8>, <8 x i8>* %a1, align 1 @@ -1367,7 +1367,7 @@ ; ZNVER1-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> %2 = sext <4 x i8> %1 to <4 x i32> %3 = load <4 x i8>, <4 x i8>* %a1, align 1 @@ -1418,7 +1418,7 @@ ; ZNVER1-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> %2 = sext <2 x i8> %1 to <2 x i64> %3 = load <2 x i8>, <2 x i8>* %a1, align 1 @@ -1469,7 +1469,7 @@ ; ZNVER1-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> %2 = sext <2 x i32> %1 to <2 x i64> %3 = load <2 x i32>, <2 x i32>* %a1, align 1 @@ -1520,7 +1520,7 @@ ; ZNVER1-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> %2 = sext <4 x i16> %1 to <4 x i32> %3 = load <4 x i16>, <4 x i16>* %a1, align 1 @@ -1571,7 +1571,7 @@ ; ZNVER1-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> %2 = sext <2 x i16> %1 to <2 x i64> %3 = load <2 x i16>, <2 x i16>* %a1, align 1 @@ -1622,7 +1622,7 @@ ; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50] ; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> %2 = zext <8 x i8> %1 to <8 x i16> %3 = load <8 x i8>, <8 x i8>* %a1, align 1 @@ -1673,7 +1673,7 @@ ; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50] ; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> %2 = zext <4 x i8> %1 to <4 x i32> %3 = load <4 x i8>, <4 x i8>* %a1, align 1 @@ -1724,7 +1724,7 @@ ; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50] ; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> %2 = zext <2 x i8> %1 to <2 x i64> %3 = load <2 x i8>, <2 x i8>* %a1, align 1 @@ -1775,7 +1775,7 @@ ; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [8:0.50] ; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> %2 = zext <2 x i32> %1 to <2 x i64> %3 = load <2 x i32>, <2 x i32>* %a1, align 1 @@ -1826,7 +1826,7 @@ ; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50] ; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> %2 = zext <4 x i16> %1 to <4 x i32> %3 = load <4 x i16>, <4 x i16>* %a1, align 1 @@ -1877,7 +1877,7 @@ ; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [8:0.50] ; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.25] ; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> %2 = zext <2 x i16> %1 to <2 x i64> %3 = load <2 x i16>, <2 x i16>* %a1, align 1 @@ -1921,7 +1921,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) %2 = bitcast <2 x i64> %1 to <4 x i32> %3 = load <4 x i32>, <4 x i32> *%a2, align 16 @@ -1965,7 +1965,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = mul <4 x i32> %a0, %a1 %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = mul <4 x i32> %1, %2 @@ -2031,7 +2031,7 @@ ; ZNVER1-NEXT: setb %cl # sched: [1:0.25] ; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25] ; ZNVER1-NEXT: movzbl %cl, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %2) @@ -2082,7 +2082,7 @@ ; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [10:1.00] ; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %2, i32 7) @@ -2133,7 +2133,7 @@ ; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [10:1.00] ; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %2, i32 7) @@ -2185,7 +2185,7 @@ ; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00] ; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00] ; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) %2 = load <2 x double>, <2 x double>* %a2, align 16 %3 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %2, i32 7) @@ -2237,7 +2237,7 @@ ; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00] ; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00] ; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %2, i32 7) Index: test/CodeGen/X86/sse42-schedule.ll =================================================================== --- test/CodeGen/X86/sse42-schedule.ll +++ test/CodeGen/X86/sse42-schedule.ll @@ -49,7 +49,7 @@ ; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00] ; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00] ; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a0, i8 %a1) %2 = load i8, i8 *%a2 %3 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %1, i8 %2) @@ -98,7 +98,7 @@ ; ZNVER1-NEXT: crc32w %si, %edi # sched: [3:1.00] ; ZNVER1-NEXT: crc32w (%rdx), %edi # sched: [10:1.00] ; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a0, i16 %a1) %2 = load i16, i16 *%a2 %3 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %1, i16 %2) @@ -147,7 +147,7 @@ ; ZNVER1-NEXT: crc32l %esi, %edi # sched: [3:1.00] ; ZNVER1-NEXT: crc32l (%rdx), %edi # sched: [10:1.00] ; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a0, i32 %a1) %2 = load i32, i32 *%a2 %3 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %1, i32 %2) @@ -196,7 +196,7 @@ ; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00] ; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00] ; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1) %2 = load i8, i8 *%a2 %3 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %1, i8 %2) @@ -245,7 +245,7 @@ ; ZNVER1-NEXT: crc32q %rsi, %rdi # sched: [3:1.00] ; ZNVER1-NEXT: crc32q (%rdx), %rdi # sched: [10:1.00] ; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1) %2 = load i64, i64 *%a2 %3 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %1, i64 %2) @@ -330,7 +330,7 @@ ; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?] ; ZNVER1-NEXT: # kill: %ECX %ECX %RCX ; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %2, i32 7, i8 7) @@ -398,7 +398,7 @@ ; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:?] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) @@ -459,7 +459,7 @@ ; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?] ; ZNVER1-NEXT: # kill: %ECX %ECX %RCX ; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %2, i8 7) @@ -503,7 +503,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?] ; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %1, <16 x i8> %2, i8 7) @@ -546,7 +546,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = icmp sgt <2 x i64> %a0, %a1 %2 = sext <2 x i1> %1 to <2 x i64> %3 = load <2 x i64>, <2 x i64>*%a2, align 16 Index: test/CodeGen/X86/sse4a-schedule.ll =================================================================== --- test/CodeGen/X86/sse4a-schedule.ll +++ test/CodeGen/X86/sse4a-schedule.ll @@ -17,7 +17,7 @@ ; ZNVER1-LABEL: test_extrq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: extrq %xmm1, %xmm0 -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1) ret <2 x i64> %1 } @@ -37,7 +37,7 @@ ; ZNVER1-LABEL: test_extrqi: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: extrq $2, $3, %xmm0 -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2) ret <2 x i64> %1 } @@ -57,7 +57,7 @@ ; ZNVER1-LABEL: test_insertq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: insertq %xmm1, %xmm0 -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1) ret <2 x i64> %1 } @@ -77,7 +77,7 @@ ; ZNVER1-LABEL: test_insertqi: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0 -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6) ret <2 x i64> %1 } @@ -97,7 +97,7 @@ ; ZNVER1-LABEL: test_movntsd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) ret void } @@ -117,7 +117,7 @@ ; ZNVER1-LABEL: test_movntss: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) ret void } Index: test/CodeGen/X86/ssse3-schedule.ll =================================================================== --- test/CodeGen/X86/ssse3-schedule.ll +++ test/CodeGen/X86/ssse3-schedule.ll @@ -58,7 +58,7 @@ ; ZNVER1-NEXT: vpabsb (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) %2 = load <16 x i8>, <16 x i8> *%a1, align 16 %3 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %2) @@ -116,7 +116,7 @@ ; ZNVER1-NEXT: vpabsd (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) %2 = load <4 x i32>, <4 x i32> *%a1, align 16 %3 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %2) @@ -174,7 +174,7 @@ ; ZNVER1-NEXT: vpabsw (%rdi), %xmm1 # sched: [8:0.50] ; ZNVER1-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) %2 = load <8 x i16>, <8 x i16> *%a1, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %2) @@ -229,7 +229,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.25] ; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = shufflevector <8 x i16> %2, <8 x i16> %1, <8 x i32> @@ -277,7 +277,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %1, <4 x i32> %2) @@ -326,7 +326,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %1, <8 x i16> %2) @@ -375,7 +375,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %1, <8 x i16> %2) @@ -424,7 +424,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %1, <4 x i32> %2) @@ -473,7 +473,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %1, <8 x i16> %2) @@ -522,7 +522,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %1, <8 x i16> %2) @@ -571,7 +571,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = bitcast <8 x i16> %1 to <16 x i8> @@ -621,7 +621,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00] ; ZNVER1-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %1, <8 x i16> %2) @@ -670,7 +670,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> %2) @@ -723,7 +723,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 %3 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %1, <16 x i8> %2) @@ -776,7 +776,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %1, <4 x i32> %2) @@ -829,7 +829,7 @@ ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 %3 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %1, <8 x i16> %2)