Index: lib/Target/X86/X86ScheduleBtVer2.td =================================================================== --- lib/Target/X86/X86ScheduleBtVer2.td +++ lib/Target/X86/X86ScheduleBtVer2.td @@ -17,6 +17,8 @@ // All x86 instructions are modeled as a single micro-op, and btver2 can // decode 2 instructions per cycle. let IssueWidth = 2; + // FIXME: maximum of 44 macro-ops which have floating-point micro-op components can be + // in-flight in the 64-macro-op in-flight window that the integer retire control unit provides. let MicroOpBufferSize = 64; // Retire Control Unit let LoadLatency = 5; // FPU latency (worse case cf Integer 3 cycle latency) let HighLatency = 25; @@ -337,5 +339,281 @@ def : WriteRes { let Latency = 100; } def : WriteRes; def : WriteRes; + +//////////////////////////////////////////////////////////////////////////////// +// AVX instructions. +//////////////////////////////////////////////////////////////////////////////// + +def WriteFAddY: SchedWriteRes<[JFPU0]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteFAddY], (instregex "VADD(SUB)?P(S|D)Yrr", "VSUBP(S|D)Yrr")>; + +def WriteFAddYLd: SchedWriteRes<[JLAGU, JFPU0]> { + let Latency = 8; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteFAddYLd, ReadAfterLd], (instregex "VADD(SUB)?P(S|D)Yrm", "VSUBP(S|D)Yrm")>; + +def WriteFDivY: SchedWriteRes<[JFPU1]> { + let Latency = 38; + let ResourceCycles = [38]; +} +def : InstRW<[WriteFDivY], (instregex "VDIVP(D|S)Yrr")>; + +def WriteFDivYLd: SchedWriteRes<[JLAGU, JFPU1]> { + let Latency = 43; + let ResourceCycles = [1, 38]; +} +def : InstRW<[WriteFDivYLd, ReadAfterLd], (instregex "VDIVP(S|D)Yrm")>; + +def WriteVMULYPD: SchedWriteRes<[JFPU1]> { + let Latency = 4; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVMULYPD], (instregex "VMULPDYrr")>; + +def WriteVMULYPDLd: SchedWriteRes<[JLAGU, JFPU1]> { + let Latency = 9; + let ResourceCycles = [1, 4]; +} +def : InstRW<[WriteVMULYPDLd, ReadAfterLd], (instregex "VMULPDYrm")>; + +def WriteVMULYPS: SchedWriteRes<[JFPU1]> { + let Latency = 2; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVMULYPS], (instregex "VMULPSYrr", "VRVPPSYr", "VRSQRTPSYr")>; + +def WriteVMULYPSLd: SchedWriteRes<[JLAGU, JFPU1]> { + let Latency = 7; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteVMULYPSLd, ReadAfterLd], (instregex "VMULPSYrm", "VRVPPSYm", "VRSQRTPSYm")>; + +def WriteVDPYPS: SchedWriteRes<[JFPU1, JFPU0]> { + let Latency = 12; + let ResourceCycles = [6, 6]; +} +def : InstRW<[WriteVDPYPS], (instregex "VDPPSYrr")>; + +def WriteVDPYPSLd: SchedWriteRes<[JLAGU, JFPU1, JFPU0]> { + let Latency = 17; + let ResourceCycles = [1, 6, 6]; +} +def : InstRW<[WriteVDPYPSLd, ReadAfterLd], (instregex "VDPPSYrm")>; + +def WriteVCVT: SchedWriteRes<[JSTC]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVCVT], (instregex "VCVTDQ2P(S|D)Yrr", "VMOVNTP(S|D)Ymr", "VROUNDYP(S|D)r")>; + +def WriteVCVTLd: SchedWriteRes<[JLAGU, JSTC]> { + let Latency = 8; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteVCVTLd, ReadAfterLd], (instregex "VCVTDQ2P(S|D)Yrm", "VROUNDYP(S|D)r")>; + +def WriteVCVTYPD: SchedWriteRes<[JSTC, JFPU01]> { + let Latency = 6; + let ResourceCycles = [2, 4]; +} +def : InstRW<[WriteVCVTYPD], (instregex "VCVTPD2(DQ|PS)Yrr")>; + +def WriteVCVTYPDLd: SchedWriteRes<[JLAGU, JSTC]> { + let Latency = 11; + let ResourceCycles = [1, 2, 2]; +} +def : InstRW<[WriteVCVTYPDLd, ReadAfterLd], (instregex "VCVTPD2(DQ|PS)Yrm")>; + +def WriteVCVTYPS: SchedWriteRes<[JSTC]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVCVTYPS], (instregex "VCVTPS2DQYrr")>; + +def WriteVCVTYPSLd: SchedWriteRes<[JLAGU, JSTC]> { + let Latency = 11; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteVCVTYPSLd, ReadAfterLd], (instregex "VCVTPS2DQYrm")>; + +def WriteAVX105: SchedWriteRes<[JFPU01]> { + let ResourceCycles = [1]; +} +def : InstRW<[WriteAVX105], (instregex "VMOVAP(D|S)rr", "VMOVDDUPrr", "VMOVHLPSrr", + "VMOVHPDrm", "VMOVLHPSrr", "VMOVLP(D|S)rm")>; + +def WriteAVX105Ld: SchedWriteRes<[JLAGU, JFPU01]> { + let Latency = 6; + let ResourceCycles = [1, 1]; +} +def : InstRW<[WriteAVX105Ld, ReadAfterLd], (instregex "VMOVAP(D|S)mr", "VMOVDDUPmr", + "VMOVHPDmr", "VMOVLP(D|S)mr")>; + +def WriteAVX11: SchedWriteRes<[JFPU01]> { + let ResourceCycles = [2]; +} +def : InstRW<[WriteAVX11], (instregex "VAND(N)?P(S|D)Yrr", "VBLENDP(S|D)Yrri", + "VBROADCASTF128", "VBROADCASTSSrr", "VINSERTF128rr", + "VMOVDDUPYrr", "VMOVS(H|L)DUPYrr", "VMOVUP(D|S)Yrr", + "VORP(S|D)Yrr", "VPERMILP(D|S)Yri", "VSHUFP(D|S)Yrri", "VUNPCK(H|L)P(D|S)rr", + "VXORP(S|D)Yrr")>; + +def WriteAVX11Ld: SchedWriteRes<[JLAGU, JFPU01]> { + let Latency = 6; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteAVX11Ld, ReadAfterLd], (instregex "VAND(N)?P(S|D)Yrm", + "VBLENDP(S|D)Yrmi", "VBROADCASTF128", "VBROADCASTSSrm", + "VINSERTF128rm", + "VMOVAP(D|S)rm", "VMOVDDUPYrm", "VMOVS(H|L)DUPYrr", "VMOVUP(D|S)Ymr", + "VORP(S|D)Yrm", "VPERMILP(D|S)Yrm", "VSHUFP(D|S)Yrmi", "VUNPCK(H|L)P(D|S)rm", + "VXORP(S|D)Yrm")>; + +def WriteVBlendVPY: SchedWriteRes<[JFPU01]> { + let Latency = 3; + let ResourceCycles = [6]; +} +def : InstRW<[WriteVBlendVPY], (instregex "VBLENDVP(S|D)Yrr", "VPERMILP(D|S)Yrr")>; + +def WriteVBlendVPYLd: SchedWriteRes<[JLAGU, JFPU01]> { + let Latency = 8; + let ResourceCycles = [1, 6]; +} +def : InstRW<[WriteVBlendVPYLd, ReadAfterLd], (instregex "VBLENDVP(S|D)Yrm")>; + +def WriteVBROADCASTY: SchedWriteRes<[JFPU01]> { + let Latency = 1; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVBROADCASTY], (instregex "VBROADCASTS(S|D)Yrr")>; + +def WriteVBROADCASTYLd: SchedWriteRes<[JLAGU, JFPU01]> { + let Latency = 6; + let ResourceCycles = [1, 4]; +} +def : InstRW<[WriteVBROADCASTYLd, ReadAfterLd], (instregex "VBROADCASTS(S|D)Yrm")>; + +def WriteFPAY22: SchedWriteRes<[JFPU0]> { + let Latency = 2; + let ResourceCycles = [2]; +} +def : InstRW<[WriteFPAY22], (instregex "VCMPP(S|D)Yrri", "VM(AX|IN)P(D|S)Yrr")>; + +def WriteFPAY22Ld: SchedWriteRes<[JLAGU, JFPU0]> { + let Latency = 7; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteFPAY22Ld, ReadAfterLd], (instregex "VCMPP(S|D)Yrmi", "VM(AX|IN)P(D|S)Yrm")>; + +def WriteExtr128: SchedWriteRes<[JALU01]> { + let ResourceCycles = [1]; +} +def : InstRW<[WriteExtr128], (instregex "VEXTRACTF128rr")>; + +def WriteExtr128Ld: SchedWriteRes<[JLAGU, JALU01]> { + let Latency = 6; +} +def : InstRW<[WriteExtr128Ld], (instregex "VEXTRACTF128mr")>; + +def WriteVHAddSubY: SchedWriteRes<[JFPU0]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVHAddSubY], (instregex "VH(ADD|SUB)P(D|S)Yrr")>; + +def WriteVHAddSubYLd: SchedWriteRes<[JLAGU, JFPU0]> { + let Latency = 8; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteVHAddSubYLd], (instregex "VH(ADD|SUB)P(D|S)Yrm")>; + +def WriteVMaskMovY: SchedWriteRes<[JFPU01]> { + let Latency = 6; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVMaskMovY], (instregex "VMASKMOVP(D|S)Yrm")>; + +def WriteVMaskMovYLd: SchedWriteRes<[JLAGU, JFPU01]> { + let Latency = 11; + let ResourceCycles = [1, 4]; +} +def : InstRW<[WriteVMaskMovYLd], (instregex "VMASKMOVP(D|S)Ymr")>; + +def WriteVMaskMov: SchedWriteRes<[JFPU01]> { + let Latency = 6; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVMaskMov], (instregex "VMASKMOVP(D|S)rm")>; + +def WriteVMaskMovLd: SchedWriteRes<[JLAGU, JFPU01]> { + let Latency = 11; + let ResourceCycles = [1, 2]; +} +def : InstRW<[WriteVMaskMovLd], (instregex "VMASKMOVP(D|S)mr")>; + +// TODO: In fact we have latency '2+i'. The +i represents an additional 1 cycle transfer +// operation which moves the floating point result to the integer unit. During this +// additional cycle the floating point unit execution resources are not occupied +// and ALU0 in the integer unit is occupied instead. +def WriteVMOVMSK: SchedWriteRes<[JFPU0]> { + let Latency = 3; +} +def : InstRW<[WriteVMOVMSK], (instregex "VMOVMSKP(D|S)Yrr", "VTESTP(S|D)rr")>; + +def WriteVTESTLd: SchedWriteRes<[JLAGU, JFPU0]> { + let Latency = 8; +} +def : InstRW<[WriteVTESTLd], (instregex "VTESTP(S|D)rm")>; + +// TODO: In fact we have latency '3+i'. The +i represents an additional 1 cycle transfer +// operation which moves the floating point result to the integer unit. During this +// additional cycle the floating point unit execution resources are not occupied +// and ALU0 in the integer unit is occupied instead. +def WriteVTESTY: SchedWriteRes<[JFPU01, JFPU0]> { + let Latency = 4; + let ResourceCycles = [2, 2]; +} +def : InstRW<[WriteVTESTY], (instregex "VTESTP(S|D)Yrr")>; + +def WriteVTESTYLd: SchedWriteRes<[JLAGU, JFPU01, JFPU0]> { + let Latency = 9; + let ResourceCycles = [1, 4, 2]; +} +def : InstRW<[WriteVTESTYLd], (instregex "VTESTP(S|D)Yrm")>; + +def WriteVPermilP: SchedWriteRes<[JFPU01]> { + let Latency = 1; + let ResourceCycles = [1]; +} +def : InstRW<[WriteVMaskMov], (instregex "VPERMILP(D|S)ri")>; + +def WriteVSQRTYPD: SchedWriteRes<[JFPU1]> { + let Latency = 54; + let ResourceCycles = [54]; +} +def : InstRW<[WriteVSQRTYPD], (instregex "VSQRTPDYr")>; + +def WriteVSQRTYPDLd: SchedWriteRes<[JLAGU, JFPU1]> { + let Latency = 59; + let ResourceCycles = [1, 54]; +} +def : InstRW<[WriteVSQRTYPDLd], (instregex "VSQRTPDYm")>; + +def WriteVSQRTYPS: SchedWriteRes<[JFPU1]> { + let Latency = 42; + let ResourceCycles = [42]; +} +def : InstRW<[WriteVSQRTYPS], (instregex "VSQRTPSYr")>; + +def WriteVSQRTYPSLd: SchedWriteRes<[JLAGU, JFPU1]> { + let Latency = 47; + let ResourceCycles = [1, 42]; +} +def : InstRW<[WriteVSQRTYPSLd], (instregex "VSQRTPSYm")>; + } // SchedModel Index: test/CodeGen/X86/avx-schedule.ll =================================================================== --- test/CodeGen/X86/avx-schedule.ll +++ test/CodeGen/X86/avx-schedule.ll @@ -21,14 +21,14 @@ ; ; BTVER2-LABEL: test_addpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -51,14 +51,14 @@ ; ; BTVER2-LABEL: test_addps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -81,14 +81,14 @@ ; ; BTVER2-LABEL: test_addsubpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addsubpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -112,14 +112,14 @@ ; ; BTVER2-LABEL: test_addsubps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addsubps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -145,16 +145,16 @@ ; ; BTVER2-LABEL: test_andnotpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andnotpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -186,16 +186,16 @@ ; ; BTVER2-LABEL: test_andnotps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andnotps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -227,16 +227,16 @@ ; ; BTVER2-LABEL: test_andpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -266,16 +266,16 @@ ; ; BTVER2-LABEL: test_andps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -305,15 +305,15 @@ ; ; BTVER2-LABEL: test_blendpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> @@ -338,13 +338,13 @@ ; ; BTVER2-LABEL: test_blendps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50] +; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:1.00] ; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50] +; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:1.00] ; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> @@ -368,14 +368,14 @@ ; ; BTVER2-LABEL: test_blendvpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; BTVER2-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendvpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) %2 = load <4 x double>, <4 x double> *%a3, align 32 @@ -399,14 +399,14 @@ ; ; BTVER2-LABEL: test_blendvps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; BTVER2-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendvps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) %2 = load <8 x float>, <8 x float> *%a3, align 32 @@ -428,12 +428,12 @@ ; ; BTVER2-LABEL: test_broadcastf128: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:1.00] +; BTVER2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastf128: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [6:1.00] +; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <4 x float>, <4 x float> *%a0, align 32 %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> @@ -453,12 +453,12 @@ ; ; BTVER2-LABEL: test_broadcastsd_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastsd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load double, double *%a0, align 8 %2 = insertelement <4 x double> undef, double %1, i32 0 @@ -479,12 +479,12 @@ ; ; BTVER2-LABEL: test_broadcastss: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastss: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:1.00] +; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load float, float *%a0, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 @@ -505,12 +505,12 @@ ; ; BTVER2-LABEL: test_broadcastss_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastss_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load float, float *%a0, align 4 %2 = insertelement <8 x float> undef, float %1, i32 0 @@ -535,16 +535,16 @@ ; ; BTVER2-LABEL: test_cmppd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; BTVER2-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cmppd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fcmp oeq <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -573,16 +573,16 @@ ; ; BTVER2-LABEL: test_cmpps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; BTVER2-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cmpps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fcmp oeq <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -611,16 +611,16 @@ ; ; BTVER2-LABEL: test_cvtdq2pd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:2.00] +; BTVER2-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtdq2pd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:2.00] +; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = sitofp <4 x i32> %a0 to <4 x double> %2 = load <4 x i32>, <4 x i32> *%a1, align 16 @@ -648,16 +648,16 @@ ; ; BTVER2-LABEL: test_cvtdq2ps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:2.00] +; BTVER2-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtdq2ps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:2.00] +; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = sitofp <8 x i32> %a0 to <8 x float> %2 = load <8 x i32>, <8 x i32> *%a1, align 16 @@ -685,14 +685,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00] ; BTVER2-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtpd2dq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00] ; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fptosi <4 x double> %a0 to <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -718,16 +718,16 @@ ; ; BTVER2-LABEL: test_cvtpd2ps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00] -; BTVER2-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:2.00] +; BTVER2-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:2.00] +; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtpd2ps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00] -; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:2.00] +; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:2.00] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fptrunc <4 x double> %a0 to <4 x float> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -755,14 +755,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [8:1.00] ; BTVER2-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtps2dq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [8:1.00] ; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fptosi <8 x float> %a0 to <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -786,14 +786,14 @@ ; ; BTVER2-LABEL: test_divpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_divpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fdiv <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -816,14 +816,14 @@ ; ; BTVER2-LABEL: test_divps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_divps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fdiv <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -846,14 +846,14 @@ ; ; BTVER2-LABEL: test_dpps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:6.00] +; BTVER2-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [17:6.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_dpps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:6.00] +; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [17:6.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -880,13 +880,13 @@ ; BTVER2-LABEL: test_extractf128: ; BTVER2: # BB#0: ; BTVER2-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50] -; BTVER2-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_extractf128: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00] +; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [6:1.00] ; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> @@ -910,14 +910,14 @@ ; ; BTVER2-LABEL: test_haddpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_haddpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -941,14 +941,14 @@ ; ; BTVER2-LABEL: test_haddps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_haddps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -972,14 +972,14 @@ ; ; BTVER2-LABEL: test_hsubpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_hsubpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1003,14 +1003,14 @@ ; ; BTVER2-LABEL: test_hsubps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_hsubps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1036,16 +1036,16 @@ ; ; BTVER2-LABEL: test_insertf128: ; BTVER2: # BB#0: -; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50] +; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00] ; BTVER2-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_insertf128: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00] ; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> %2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> @@ -1098,15 +1098,15 @@ ; ; BTVER2-LABEL: test_maskmovpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; BTVER2-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; BTVER2-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1) @@ -1133,15 +1133,15 @@ ; ; BTVER2-LABEL: test_maskmovpd_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; BTVER2-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; BTVER2-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovpd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1) @@ -1168,15 +1168,15 @@ ; ; BTVER2-LABEL: test_maskmovps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; BTVER2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; BTVER2-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1) @@ -1203,15 +1203,15 @@ ; ; BTVER2-LABEL: test_maskmovps_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; BTVER2-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; BTVER2-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovps_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1) @@ -1236,14 +1236,14 @@ ; ; BTVER2-LABEL: test_maxpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maxpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1267,14 +1267,14 @@ ; ; BTVER2-LABEL: test_maxps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maxps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1298,14 +1298,14 @@ ; ; BTVER2-LABEL: test_minpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_minpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1329,14 +1329,14 @@ ; ; BTVER2-LABEL: test_minps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_minps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1363,14 +1363,14 @@ ; BTVER2-LABEL: test_movapd: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movapd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <4 x double>, <4 x double> *%a0, align 32 @@ -1397,14 +1397,14 @@ ; BTVER2-LABEL: test_movaps: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movaps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <8 x float>, <8 x float> *%a0, align 32 @@ -1430,16 +1430,16 @@ ; ; BTVER2-LABEL: test_movddup: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00] -; BTVER2-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [6:1.00] +; BTVER2-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movddup: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00] -; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [6:1.00] +; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -1463,12 +1463,12 @@ ; ; BTVER2-LABEL: test_movmskpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movmskpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.50] +; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00] ; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) @@ -1491,12 +1491,12 @@ ; ; BTVER2-LABEL: test_movmskps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movmskps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.50] +; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00] ; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) @@ -1519,14 +1519,14 @@ ; ; BTVER2-LABEL: test_movntpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovntpd %ymm0, (%rdi) # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movntpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <4 x double> %a0, %a0 store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0 @@ -1548,14 +1548,14 @@ ; ; BTVER2-LABEL: test_movntps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovntps %ymm0, (%rdi) # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movntps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <8 x float> %a0, %a0 store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0 @@ -1580,15 +1580,15 @@ ; BTVER2-LABEL: test_movshdup: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00] -; BTVER2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movshdup: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00] -; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -1615,15 +1615,15 @@ ; BTVER2-LABEL: test_movsldup: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00] -; BTVER2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movsldup: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00] -; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -1652,15 +1652,15 @@ ; BTVER2-LABEL: test_movupd: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovupd %ymm0, (%rsi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movupd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <4 x double>, <4 x double> *%a0, align 1 %2 = fadd <4 x double> %1, %1 @@ -1688,15 +1688,15 @@ ; BTVER2-LABEL: test_movups: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovups %ymm0, (%rsi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movups: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <8 x float>, <8 x float> *%a0, align 1 %2 = fadd <8 x float> %1, %1 @@ -1719,14 +1719,14 @@ ; ; BTVER2-LABEL: test_mulpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00] +; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_mulpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00] +; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fmul <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1749,14 +1749,14 @@ ; ; BTVER2-LABEL: test_mulps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_mulps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fmul <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1781,16 +1781,16 @@ ; ; BTVER2-LABEL: orpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: orpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -1820,16 +1820,16 @@ ; ; BTVER2-LABEL: test_orps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_orps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -1859,15 +1859,15 @@ ; ; BTVER2-LABEL: test_permilpd: ; BTVER2: # BB#0: +; BTVER2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [6:1.00] ; BTVER2-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00] -; BTVER2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilpd: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [6:1.00] ; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00] -; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> @@ -1895,15 +1895,15 @@ ; BTVER2-LABEL: test_permilpd_ymm: ; BTVER2: # BB#0: ; BTVER2-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00] -; BTVER2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilpd_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00] -; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -1929,15 +1929,15 @@ ; ; BTVER2-LABEL: test_permilps: ; BTVER2: # BB#0: +; BTVER2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [6:1.00] ; BTVER2-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00] -; BTVER2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilps: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [6:1.00] ; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00] -; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> @@ -1965,15 +1965,15 @@ ; BTVER2-LABEL: test_permilps_ymm: ; BTVER2: # BB#0: ; BTVER2-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00] -; BTVER2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilps_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00] -; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2028,13 +2028,13 @@ ; ; BTVER2-LABEL: test_permilvarpd_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; BTVER2-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilvarpd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) @@ -2090,13 +2090,13 @@ ; ; BTVER2-LABEL: test_permilvarps_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; BTVER2-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilvarps_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) @@ -2125,14 +2125,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_rcpps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:1.00] ; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2160,15 +2160,15 @@ ; BTVER2-LABEL: test_roundpd: ; BTVER2: # BB#0: ; BTVER2-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_roundpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -2196,15 +2196,15 @@ ; BTVER2-LABEL: test_roundps: ; BTVER2: # BB#0: ; BTVER2-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_roundps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2231,16 +2231,16 @@ ; ; BTVER2-LABEL: test_rsqrtps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:1.00] -; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_rsqrtps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:1.00] -; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00] +; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2267,16 +2267,16 @@ ; ; BTVER2-LABEL: test_shufpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50] +; BTVER2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00] ; BTVER2-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_shufpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50] +; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:1.00] ; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2300,13 +2300,13 @@ ; ; BTVER2-LABEL: test_shufps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50] +; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00] ; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_shufps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50] +; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:1.00] ; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> @@ -2332,16 +2332,16 @@ ; ; BTVER2-LABEL: test_sqrtpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [26:21.00] -; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [21:21.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00] +; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_sqrtpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [26:21.00] -; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [21:21.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00] +; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -2368,16 +2368,16 @@ ; ; BTVER2-LABEL: test_sqrtps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [26:21.00] -; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:21.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00] +; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [42:42.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_sqrtps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [26:21.00] -; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:21.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00] +; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [42:42.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2402,14 +2402,14 @@ ; ; BTVER2-LABEL: test_subpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_subpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fsub <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2432,14 +2432,14 @@ ; ; BTVER2-LABEL: test_subps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_subps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fsub <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -2469,18 +2469,18 @@ ; BTVER2-LABEL: test_testpd: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestpd %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestpd (%rdi), %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:1.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) @@ -2515,18 +2515,18 @@ ; BTVER2-LABEL: test_testpd_ymm: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestpd %ymm1, %ymm0 # sched: [4:2.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestpd (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestpd (%rdi), %ymm0 # sched: [9:3.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testpd_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [4:2.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [9:3.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] ; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] ; ZNVER1-NEXT: retq # sched: [4:1.00] @@ -2560,18 +2560,18 @@ ; BTVER2-LABEL: test_testps: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestps %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestps (%rdi), %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestps (%rdi), %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:1.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) @@ -2606,18 +2606,18 @@ ; BTVER2-LABEL: test_testps_ymm: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestps %ymm1, %ymm0 # sched: [4:2.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestps (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestps (%rdi), %ymm0 # sched: [9:3.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testps_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [4:2.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [9:3.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] ; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] ; ZNVER1-NEXT: retq # sched: [4:1.00] @@ -2648,14 +2648,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50] ; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_unpckhpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2713,14 +2713,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50] ; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_unpcklpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2776,16 +2776,16 @@ ; ; BTVER2-LABEL: test_xorpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_xorpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -2815,16 +2815,16 @@ ; ; BTVER2-LABEL: test_xorps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; BTVER2-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_xorps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:1.00] ; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> Index: test/CodeGen/X86/recip-fastmath.ll =================================================================== --- test/CodeGen/X86/recip-fastmath.ll +++ test/CodeGen/X86/recip-fastmath.ll @@ -278,7 +278,7 @@ ; ; BTVER2-LABEL: v4f32_no_estimate: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [19:19.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; @@ -340,7 +340,7 @@ ; ; BTVER2-LABEL: v4f32_one_step: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00] @@ -439,7 +439,7 @@ ; ; BTVER2-LABEL: v4f32_two_step: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00] @@ -541,7 +541,7 @@ ; BTVER2-LABEL: v8f32_no_estimate: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] -; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:19.00] +; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [38:38.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_no_estimate: @@ -611,10 +611,10 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_one_step: @@ -723,14 +723,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_two_step: Index: test/CodeGen/X86/recip-fastmath2.ll =================================================================== --- test/CodeGen/X86/recip-fastmath2.ll +++ test/CodeGen/X86/recip-fastmath2.ll @@ -392,7 +392,7 @@ ; ; BTVER2-LABEL: v4f32_one_step2: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00] @@ -489,7 +489,7 @@ ; ; BTVER2-LABEL: v4f32_one_step_2_divs: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00] @@ -604,7 +604,7 @@ ; ; BTVER2-LABEL: v4f32_two_step2: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00] @@ -730,11 +730,11 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_one_step2: @@ -836,12 +836,12 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_one_step_2_divs: @@ -965,15 +965,15 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_two_step2: @@ -1119,7 +1119,7 @@ ; BTVER2-LABEL: v8f32_no_step2: ; BTVER2: # BB#0: ; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_no_step2: Index: test/CodeGen/X86/slow-unaligned-mem.ll =================================================================== --- test/CodeGen/X86/slow-unaligned-mem.ll +++ test/CodeGen/X86/slow-unaligned-mem.ll @@ -86,7 +86,7 @@ ; FAST-NOT: not a recognized processor ; FAST-LABEL: store_zeros: ; FAST: # BB#0: -; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax +; FAST: movl {{[0-9]+}}(%esp), %eax ; FAST-NOT: movl call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i32 1, i1 false) ret void Index: test/CodeGen/X86/sse-schedule.ll =================================================================== --- test/CodeGen/X86/sse-schedule.ll +++ test/CodeGen/X86/sse-schedule.ll @@ -1031,9 +1031,9 @@ ; ; BTVER2-LABEL: test_movaps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vmovaps (%rdi), %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = load <4 x float>, <4 x float> *%a0, align 16 %2 = fadd <4 x float> %1, %1 @@ -1077,7 +1077,7 @@ ; ; BTVER2-LABEL: test_movhlps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50] +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> ret <4 x float> %1 @@ -1125,7 +1125,7 @@ ; ; BTVER2-LABEL: test_movhps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:0.50] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] @@ -1174,7 +1174,7 @@ ; ; BTVER2-LABEL: test_movlhps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] ; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> @@ -1220,9 +1220,9 @@ ; ; BTVER2-LABEL: test_movlps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00] +; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:0.50] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vmovlps %xmm0, (%rdi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = bitcast x86_mmx* %a2 to <2 x float>* %2 = load <2 x float>, <2 x float> *%1, align 8 @@ -2033,7 +2033,7 @@ ; ; BTVER2-LABEL: test_sqrtss: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps (%rdi), %xmm1 # sched: [5:1.00] +; BTVER2-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:1.00] ; BTVER2-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [26:21.00] ; BTVER2-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] @@ -2303,7 +2303,7 @@ ; ; BTVER2-LABEL: test_unpckhps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:1.00] ; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> @@ -2349,7 +2349,7 @@ ; ; BTVER2-LABEL: test_unpcklps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:1.00] ; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> Index: test/CodeGen/X86/sse2-schedule.ll =================================================================== --- test/CodeGen/X86/sse2-schedule.ll +++ test/CodeGen/X86/sse2-schedule.ll @@ -1628,9 +1628,9 @@ ; ; BTVER2-LABEL: test_movapd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovapd (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vmovapd (%rdi), %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vmovapd %xmm0, (%rsi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = load <2 x double>, <2 x double> *%a0, align 16 %2 = fadd <2 x double> %1, %1 @@ -1914,9 +1914,9 @@ ; ; BTVER2-LABEL: test_movhpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [6:1.00] +; BTVER2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [1:0.50] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vmovhpd %xmm0, (%rdi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = bitcast x86_mmx* %a2 to double* %2 = load double, double *%1, align 8 @@ -1965,9 +1965,9 @@ ; ; BTVER2-LABEL: test_movlpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [6:1.00] +; BTVER2-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [1:0.50] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vmovlpd %xmm0, (%rdi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = bitcast x86_mmx* %a2 to double* %2 = load double, double *%1, align 8 @@ -2275,7 +2275,7 @@ ; ; BTVER2-LABEL: test_movsd_reg: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> ret <2 x double> %1 @@ -5736,7 +5736,7 @@ ; ; BTVER2-LABEL: test_sqrtsd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovapd (%rdi), %xmm1 # sched: [5:1.00] +; BTVER2-NEXT: vmovapd (%rdi), %xmm1 # sched: [6:1.00] ; BTVER2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [26:21.00] ; BTVER2-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [26:21.00] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] @@ -5963,7 +5963,7 @@ ; ; BTVER2-LABEL: test_unpckhpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:1.00] ; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] @@ -6018,7 +6018,7 @@ ; ; BTVER2-LABEL: test_unpcklpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] ; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [6:1.00] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00]