Index: include/llvm/CodeGen/TargetSchedule.h =================================================================== --- include/llvm/CodeGen/TargetSchedule.h +++ include/llvm/CodeGen/TargetSchedule.h @@ -40,6 +40,8 @@ unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor. unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const; + Optional + getRThroughputFromInstrSchedModel(const MCSchedClassDesc *SCDesc) const; public: TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {} Index: lib/CodeGen/TargetSchedule.cpp =================================================================== --- lib/CodeGen/TargetSchedule.cpp +++ lib/CodeGen/TargetSchedule.cpp @@ -337,8 +337,8 @@ } static Optional -getRTroughputFromItineraries(unsigned schedClass, - const InstrItineraryData *IID){ +getRThroughputFromItineraries(unsigned schedClass, + const InstrItineraryData *IID) { double Unknown = std::numeric_limits::infinity(); double Throughput = Unknown; @@ -351,28 +351,31 @@ Throughput = std::min(Throughput, countPopulation(IS->getUnits()) * 1.0 / Cycles); } + if (Throughput == Unknown) + return Optional(); // We need reciprocal throughput that's why we return such value. return 1 / Throughput; } -static Optional -getRTroughputFromInstrSchedModel(const MCSchedClassDesc *SCDesc, - const TargetSubtargetInfo *STI, - const MCSchedModel &SchedModel) { +Optional TargetSchedModel::getRThroughputFromInstrSchedModel( + const MCSchedClassDesc *SCDesc) const { double Unknown = std::numeric_limits::infinity(); double Throughput = Unknown; - - for (const MCWriteProcResEntry *WPR = STI->getWriteProcResBegin(SCDesc), - *WEnd = STI->getWriteProcResEnd(SCDesc); - WPR != WEnd; ++WPR) { + const MCWriteProcResEntry *WPR = STI->getWriteProcResBegin(SCDesc), + *WEnd = STI->getWriteProcResEnd(SCDesc); + if ((WPR == WEnd) && SCDesc->isValid()) + return computeInstrLatency(*SCDesc); + for (; WPR != WEnd; ++WPR) { unsigned Cycles = WPR->Cycles; if (!Cycles) - return Optional(); + continue; unsigned NumUnits = SchedModel.getProcResource(WPR->ProcResourceIdx)->NumUnits; Throughput = std::min(Throughput, NumUnits * 1.0 / Cycles); } + if (Throughput == Unknown) + return Optional(); // We need reciprocal throughput that's why we return such value. return 1 / Throughput; } @@ -380,11 +383,10 @@ Optional TargetSchedModel::computeInstrRThroughput(const MachineInstr *MI) const { if (hasInstrItineraries()) - return getRTroughputFromItineraries(MI->getDesc().getSchedClass(), - getInstrItineraries()); + return getRThroughputFromItineraries(MI->getDesc().getSchedClass(), + getInstrItineraries()); if (hasInstrSchedModel()) - return getRTroughputFromInstrSchedModel(resolveSchedClass(MI), STI, - SchedModel); + return getRThroughputFromInstrSchedModel(resolveSchedClass(MI)); return Optional(); } @@ -392,11 +394,11 @@ TargetSchedModel::computeInstrRThroughput(unsigned Opcode) const { unsigned SchedClass = TII->get(Opcode).getSchedClass(); if (hasInstrItineraries()) - return getRTroughputFromItineraries(SchedClass, getInstrItineraries()); + return getRThroughputFromItineraries(SchedClass, getInstrItineraries()); if (hasInstrSchedModel()) { const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass); if (SCDesc->isValid() && !SCDesc->isVariant()) - return getRTroughputFromInstrSchedModel(SCDesc, STI, SchedModel); + return getRThroughputFromInstrSchedModel(SCDesc); } return Optional(); } Index: lib/Target/X86/X86ScheduleBtVer2.td =================================================================== --- lib/Target/X86/X86ScheduleBtVer2.td +++ lib/Target/X86/X86ScheduleBtVer2.td @@ -17,12 +17,13 @@ // All x86 instructions are modeled as a single micro-op, and btver2 can // decode 2 instructions per cycle. let IssueWidth = 2; - let MicroOpBufferSize = 64; // Retire Control Unit + // FIXME: maximum of 44 macro-ops which have floating-point micro-op components can be + // in-flight in the 64-macro-op in-flight window that the integer retire control unit provides. + let MicroOpBufferSize = 64; // Integer Retire Control Unit let LoadLatency = 5; // FPU latency (worse case cf Integer 3 cycle latency) let HighLatency = 25; let MispredictPenalty = 14; // Minimum branch misdirection penalty let PostRAScheduler = 1; - // FIXME: SSE4/AVX is unimplemented. This flag is set to allow // the scheduler to assign a default model to unrecognized opcodes. let CompleteModel = 0; @@ -65,6 +66,9 @@ def JFPM : ProcResource<1>; // FP multiplication def JFPA : ProcResource<1>; // FP addition +def JFPFltCluster : ProcResGroup<[JFPA, JFPM]>; +def JFPIntCluster : ProcResGroup<[JVALU0, JVALU1, JSTC]>; + // Integer loads are 3 cycles, so ReadAfterLd registers needn't be available until 3 // cycles after the memory operand. def : ReadAdvance; @@ -90,6 +94,7 @@ multiclass JWriteResFpuPair { + // Register variant is using a single cycle on ExePort. def : WriteRes { let Latency = Lat; } @@ -173,7 +178,6 @@ defm : JWriteResFpuPair; defm : JWriteResFpuPair; defm : JWriteResFpuPair; - def : WriteRes { let Latency = 21; let ResourceCycles = [1, 1, 21]; @@ -337,5 +341,287 @@ def : WriteRes { let Latency = 100; } def : WriteRes; def : WriteRes; + +//////////////////////////////////////////////////////////////////////////////// +// AVX instructions. +//////////////////////////////////////////////////////////////////////////////// + +def WriteFAddYY: SchedWriteRes<[JFPA]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteFAddYY], (instregex "VADD(SUB)?P(S|D)Yrr", "VSUBP(S|D)Yrr")>; + +def WriteFAddYMLd: SchedWriteRes<[JFPA]> { + let Latency = 8; + let ResourceCycles = [2]; +} +def : InstRW<[WriteFAddYMLd, ReadAfterLd], (instregex "VADD(SUB)?P(S|D)Yrm", "VSUBP(S|D)Yrm")>; + +def WriteVDIV: SchedWriteRes<[JFPM]> { + let Latency = 38; + let ResourceCycles = [38]; +} +def : InstRW<[WriteVDIV], (instregex "VDIVP(D|S)Yrr")>; + +def WriteVDIVLd: SchedWriteRes<[JFPM]> { + let Latency = 43; + let ResourceCycles = [38]; +} +def : InstRW<[WriteVDIVLd, ReadAfterLd], (instregex "VDIVP(S|D)Yrm")>; + +def WriteVMULPD: SchedWriteRes<[JFPM]> { + let Latency = 4; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVMULPD], (instregex "VMULPDYrr")>; + +def WriteVMULPDLd: SchedWriteRes<[JFPM]> { + let Latency = 9; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVMULPDLd, ReadAfterLd], (instregex "VMULPDYrm")>; + +def WriteVMULPS: SchedWriteRes<[JFPM]> { + let Latency = 2; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVMULPS], (instregex "VMULPSYrr", "VRVPPSYr", "VRSQRTPSYr")>; + +def WriteVMULPSLd: SchedWriteRes<[JFPM]> { + let Latency = 7; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVMULPSLd, ReadAfterLd], (instregex "VMULPSYrm", "VRVPPSYm", "VRSQRTPSYm")>; + +def WriteVDPPS: SchedWriteRes<[JFPM, JFPA]> { + let Latency = 12; + let ResourceCycles = [6, 6]; +} +def : InstRW<[WriteVDPPS], (instregex "VDPPSYrr")>; + +def WriteVDPPSLd: SchedWriteRes<[JFPM, JFPA]> { + let Latency = 17; + let ResourceCycles = [6, 6]; +} +def : InstRW<[WriteVDPPSLd, ReadAfterLd], (instregex "VDPPSYrm")>; + +def WriteVCVT: SchedWriteRes<[JSTC]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVCVT], (instregex "VCVTDQ2P(S|D)Yrr", "VMOVNTP(S|D)Ymr", "VROUNDYP(S|D)r")>; + +def WriteVCVTLd: SchedWriteRes<[JSTC]> { + let Latency = 8; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVCVTLd, ReadAfterLd], (instregex "VCVTDQ2P(S|D)Yrm", "VROUNDYP(S|D)r")>; + +def WriteVCVTPD: SchedWriteRes<[JSTC, JFPFltCluster]> { + let Latency = 6; + let ResourceCycles = [2, 4]; +} +def : InstRW<[WriteVCVTPD], (instregex "VCVTPD2(DQ|PS)Yrr")>; + +def WriteVCVTPDLd: SchedWriteRes<[JSTC]> { + let Latency = 11; + let ResourceCycles = [2, 2]; +} +def : InstRW<[WriteVCVTPDLd, ReadAfterLd], (instregex "VCVTPD2(DQ|PS)Yrm")>; + +def WriteVCVTPS: SchedWriteRes<[JSTC]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVCVTPS], (instregex "VCVTPS2DQYrr")>; + +def WriteVCVTPSLd: SchedWriteRes<[JSTC]> { + let Latency = 11; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVCVTPSLd, ReadAfterLd], (instregex "VCVTPS2DQYrm")>; + +// FIXME: We don't need 'Ld' version for AVX11 because deafult ResourceCycles == 1 +// TODO: How to use ResourceCycles from non-folding version like we do it for Latency? +def WriteAVX11: SchedWriteRes<[JFPFltCluster]> { + let Latency = 6; + let ResourceCycles = [2]; +} +def : InstRW<[WriteAVX11], (instregex "VAND(N)?P(S|D)Yrr", "VBLENDP(S|D)Yrri", + "VBROADCASTF128", "VBROADCASTSSrr", "VINSERTF128rr", + "VMOVAP(D|S)rm", "VMOVDDUPYrr", "VMOVS(H|L)DUPYrr", "VMOVUP(D|S)Yrm", + "VORP(S|D)Yrr", "VPERMILP(D|S)Yri", "VSHUFP(D|S)Yrri", "VUNPCK(H|L)P(D|S)rr", + "VXORP(S|D)Yrr")>; + +def WriteAVX11Ld: SchedWriteRes<[JFPFltCluster]> { + let Latency = 6; + let ResourceCycles = [2]; +} +def : InstRW<[WriteAVX11Ld, ReadAfterLd], (instregex "VAND(N)?P(S|D)Yrm", + "VBLENDP(S|D)Yrmi", "VBROADCASTF128", "VBROADCASTSSrm", + "VINSERTF128rm", + "VMOVAP(D|S)rm", "VMOVDDUPYrm", "VMOVS(H|L)DUPYrr", "VMOVUP(D|S)Ymr", + "VORP(S|D)Yrm", "VPERMILP(D|S)Yrm", "VSHUFP(D|S)Yrmi", "VUNPCK(H|L)P(D|S)rm", + "VXORP(S|D)Yrm")>; + +def WriteBlendVP: SchedWriteRes<[JFPFltCluster]> { + let Latency = 3; + let ResourceCycles = [6]; +} +def : InstRW<[WriteBlendVP], (instregex "VBLENDVP(S|D)Yrr", "VPERMILP(D|S)Yrr")>; + +def WriteBlendVPLd: SchedWriteRes<[JFPFltCluster]> { + let Latency = 8; + let ResourceCycles = [6]; +} +def : InstRW<[WriteBlendVPLd, ReadAfterLd], (instregex "VBLENDVP(S|D)Yrm")>; + +def WriteVBROADCAST: SchedWriteRes<[JFPFltCluster]> { + let Latency = 1; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVBROADCAST], (instregex "VBROADCASTS(S|D)Yrr")>; + +def WriteVBROADCASTLd: SchedWriteRes<[JFPFltCluster]> { + let Latency = 6; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVBROADCASTLd, ReadAfterLd], (instregex "VBROADCASTS(S|D)Yrm")>; + +def WriteFPA22: SchedWriteRes<[JFPA]> { + let Latency = 2; + let ResourceCycles = [2]; +} +def : InstRW<[WriteFPA22], (instregex "VCMPP(S|D)Yrri", "VM(AX|IN)P(D|S)Yrr")>; + +def WriteFPA22Ld: SchedWriteRes<[JFPA]> { + let Latency = 7; + let ResourceCycles = [2]; +} +def : InstRW<[WriteFPA22Ld, ReadAfterLd], (instregex "VCMPP(S|D)Yrmi", "VM(AX|IN)P(D|S)Yrm")>; + +def WriteExtr128: SchedWriteRes<[JALU01]> { + let Latency = 1; + let ResourceCycles = [1]; +} +def : InstRW<[WriteExtr128], (instregex "VEXTRACTF128rr")>; + +def WriteExtr128Ld: SchedWriteRes<[JALU01]> { + let Latency = 6; + let ResourceCycles = [1]; +} +def : InstRW<[WriteExtr128Ld], (instregex "VEXTRACTF128mr")>; + +def WriteVHAddSub: SchedWriteRes<[JFPA]> { + let Latency = 3; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVHAddSub], (instregex "VH(ADD|SUB)P(D|S)Yrr")>; + +def WriteVHAddSubLd: SchedWriteRes<[JFPA]> { + let Latency = 8; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVHAddSubLd], (instregex "VH(ADD|SUB)P(D|S)Yrm")>; + +def WriteVMaskMovY: SchedWriteRes<[JFPFltCluster]> { + let Latency = 6; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVMaskMovY], (instregex "VMASKMOVP(D|S)Yrm")>; + +def WriteVMaskMovYLd: SchedWriteRes<[JFPFltCluster]> { + let Latency = 11; + let ResourceCycles = [4]; +} +def : InstRW<[WriteVMaskMovYLd], (instregex "VMASKMOVP(D|S)Ymr")>; + +def WriteVMaskMov: SchedWriteRes<[JFPFltCluster]> { + let Latency = 6; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVMaskMov], (instregex "VMASKMOVP(D|S)rm")>; + +def WriteVMaskMovLd: SchedWriteRes<[JFPFltCluster]> { + let Latency = 11; + let ResourceCycles = [2]; +} +def : InstRW<[WriteVMaskMovLd], (instregex "VMASKMOVP(D|S)mr")>; + +// TODO: In fact we have latency '2+i'. The +i represents an additional 1 cycle transfer +// operation which moves the floating point result to the integer unit. During this +// additional cycle the floating point unit execution resources are not occupied +// and ALU0 in the integer unit is occupied instead. +def WriteVMOVMSK: SchedWriteRes<[JFPA]> { + let Latency = 3; + let ResourceCycles = [1]; +} +def : InstRW<[WriteVMOVMSK], (instregex "VMOVMSKP(D|S)Yrr", "VTESTP(S|D)rr")>; + +def WriteVTESTLd: SchedWriteRes<[JFPA]> { + let Latency = 8; + let ResourceCycles = [1]; +} +def : InstRW<[WriteVTESTLd], (instregex "VTESTP(S|D)rm")>; + +// TODO: In fact we have latency '3+i'. The +i represents an additional 1 cycle transfer +// operation which moves the floating point result to the integer unit. During this +// additional cycle the floating point unit execution resources are not occupied +// and ALU0 in the integer unit is occupied instead. +def WriteVTESTY: SchedWriteRes<[JFPFltCluster, JFPA]> { + let Latency = 4; + let ResourceCycles = [2, 2]; +} +def : InstRW<[WriteVMOVMSK], (instregex "VTESTP(S|D)Yrr")>; + +def WriteVTESTYLd: SchedWriteRes<[JFPFltCluster, JFPA]> { + let Latency = 9; + let ResourceCycles = [4, 2]; +} +def : InstRW<[WriteVTESTYLd], (instregex "VTESTP(S|D)Yrm")>; + +def WriteVPermilP: SchedWriteRes<[JFPFltCluster]> { + let Latency = 1; + let ResourceCycles = [1]; +} +def : InstRW<[WriteVMaskMov], (instregex "VPERMILP(D|S)ri")>; + +def WriteVSQRTPD: SchedWriteRes<[JFPM]> { + let Latency = 54; + let ResourceCycles = [54]; +} +def : InstRW<[WriteVSQRTPD], (instregex "VSQRTPDYr")>; + +def WriteVSQRTPDLd: SchedWriteRes<[JFPM]> { + let Latency = 59; + let ResourceCycles = [54]; +} +def : InstRW<[WriteVSQRTPDLd], (instregex "VSQRTPDYm")>; + +def WriteVSQRTPS: SchedWriteRes<[JFPM]> { + let Latency = 42; + let ResourceCycles = [42]; +} +def : InstRW<[WriteVSQRTPD], (instregex "VSQRTPSYr")>; + +def WriteVSQRTPSLd: SchedWriteRes<[JFPM]> { + let Latency = 47; + let ResourceCycles = [42]; +} +def : InstRW<[WriteVSQRTPSLd], (instregex "VSQRTPSYm")>; + +def WriteJVZEROALL: SchedWriteRes<[]> { + let Latency = 90; + let NumMicroOps = 73; +} +def : InstRW<[WriteJVZEROALL], (instregex "VZEROALL")>; + +def WriteJVZEROUPPER: SchedWriteRes<[]> { + let Latency = 46; + let NumMicroOps = 37; +} +def : InstRW<[WriteJVZEROUPPER], (instregex "VZEROUPPER")>; + } // SchedModel Index: test/CodeGen/X86/avx-schedule.ll =================================================================== --- test/CodeGen/X86/avx-schedule.ll +++ test/CodeGen/X86/avx-schedule.ll @@ -21,14 +21,14 @@ ; ; BTVER2-LABEL: test_addpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -51,14 +51,14 @@ ; ; BTVER2-LABEL: test_addps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -81,14 +81,14 @@ ; ; BTVER2-LABEL: test_addsubpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addsubpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -112,14 +112,14 @@ ; ; BTVER2-LABEL: test_addsubps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_addsubps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -145,16 +145,16 @@ ; ; BTVER2-LABEL: test_andnotpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andnotpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -186,16 +186,16 @@ ; ; BTVER2-LABEL: test_andnotps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andnotps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -227,16 +227,16 @@ ; ; BTVER2-LABEL: test_andpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -266,16 +266,16 @@ ; ; BTVER2-LABEL: test_andps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_andps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -305,15 +305,15 @@ ; ; BTVER2-LABEL: test_blendpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [6:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> @@ -338,13 +338,13 @@ ; ; BTVER2-LABEL: test_blendps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50] +; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [6:1.00] ; BTVER2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50] +; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [6:1.00] ; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> @@ -368,14 +368,14 @@ ; ; BTVER2-LABEL: test_blendvpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; BTVER2-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendvpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) %2 = load <4 x double>, <4 x double> *%a3, align 32 @@ -399,14 +399,14 @@ ; ; BTVER2-LABEL: test_blendvps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; BTVER2-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_blendvps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [3:3.00] +; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:3.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) %2 = load <8 x float>, <8 x float> *%a3, align 32 @@ -453,12 +453,12 @@ ; ; BTVER2-LABEL: test_broadcastsd_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastsd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [6:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load double, double *%a0, align 8 %2 = insertelement <4 x double> undef, double %1, i32 0 @@ -479,12 +479,12 @@ ; ; BTVER2-LABEL: test_broadcastss: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastss: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [5:1.00] +; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load float, float *%a0, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 @@ -505,12 +505,12 @@ ; ; BTVER2-LABEL: test_broadcastss_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_broadcastss_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [6:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load float, float *%a0, align 4 %2 = insertelement <8 x float> undef, float %1, i32 0 @@ -535,16 +535,16 @@ ; ; BTVER2-LABEL: test_cmppd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; BTVER2-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cmppd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fcmp oeq <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -573,16 +573,16 @@ ; ; BTVER2-LABEL: test_cmpps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; BTVER2-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cmpps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00] -; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] -; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [2:2.00] +; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fcmp oeq <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -611,16 +611,16 @@ ; ; BTVER2-LABEL: test_cvtdq2pd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:2.00] +; BTVER2-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtdq2pd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [8:2.00] +; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = sitofp <4 x i32> %a0 to <4 x double> %2 = load <4 x i32>, <4 x i32> *%a1, align 16 @@ -648,16 +648,16 @@ ; ; BTVER2-LABEL: test_cvtdq2ps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:2.00] +; BTVER2-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtdq2ps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [8:2.00] +; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = sitofp <8 x i32> %a0 to <8 x float> %2 = load <8 x i32>, <8 x i32> *%a1, align 16 @@ -685,14 +685,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00] ; BTVER2-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtpd2dq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [8:1.00] ; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fptosi <4 x double> %a0 to <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -718,16 +718,16 @@ ; ; BTVER2-LABEL: test_cvtpd2ps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00] -; BTVER2-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [3:1.00] -; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:2.00] +; BTVER2-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:2.00] +; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtpd2ps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [8:1.00] -; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:2.00] +; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [6:2.00] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fptrunc <4 x double> %a0 to <4 x float> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -755,14 +755,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [8:1.00] ; BTVER2-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_cvtps2dq: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [8:1.00] ; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fptosi <8 x float> %a0 to <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -786,14 +786,14 @@ ; ; BTVER2-LABEL: test_divpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; BTVER2-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; BTVER2-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_divpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fdiv <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -816,14 +816,14 @@ ; ; BTVER2-LABEL: test_divps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; BTVER2-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; BTVER2-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_divps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [19:19.00] -; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [24:19.00] +; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [38:38.00] +; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [43:38.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fdiv <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -846,14 +846,14 @@ ; ; BTVER2-LABEL: test_dpps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:6.00] +; BTVER2-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [17:6.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_dpps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [12:6.00] +; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [17:6.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -874,20 +874,20 @@ ; HASWELL: # BB#0: ; HASWELL-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00] ; HASWELL-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [4:1.00] -; HASWELL-NEXT: vzeroupper # sched: [1:0.00] +; HASWELL-NEXT: vzeroupper # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_extractf128: ; BTVER2: # BB#0: ; BTVER2-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50] -; BTVER2-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [6:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_extractf128: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:1.00] -; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [6:0.50] +; ZNVER1-NEXT: vzeroupper # sched: [46:46.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> %2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> @@ -910,14 +910,14 @@ ; ; BTVER2-LABEL: test_haddpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_haddpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -941,14 +941,14 @@ ; ; BTVER2-LABEL: test_haddps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_haddps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -972,14 +972,14 @@ ; ; BTVER2-LABEL: test_hsubpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_hsubpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1003,14 +1003,14 @@ ; ; BTVER2-LABEL: test_hsubps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_hsubps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1036,16 +1036,16 @@ ; ; BTVER2-LABEL: test_insertf128: ; BTVER2: # BB#0: -; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50] +; BTVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [6:1.00] ; BTVER2-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_insertf128: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [6:1.00] ; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> %2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> @@ -1098,15 +1098,15 @@ ; ; BTVER2-LABEL: test_maskmovpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; BTVER2-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; BTVER2-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1) @@ -1133,15 +1133,15 @@ ; ; BTVER2-LABEL: test_maskmovpd_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; BTVER2-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; BTVER2-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovpd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1) @@ -1168,15 +1168,15 @@ ; ; BTVER2-LABEL: test_maskmovps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; BTVER2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; BTVER2-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [6:1.00] +; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [11:1.00] ; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1) @@ -1203,15 +1203,15 @@ ; ; BTVER2-LABEL: test_maskmovps_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; BTVER2-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; BTVER2-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; BTVER2-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; BTVER2-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maskmovps_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00] -; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [6:2.00] +; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [11:2.00] ; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1) @@ -1236,14 +1236,14 @@ ; ; BTVER2-LABEL: test_maxpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maxpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1267,14 +1267,14 @@ ; ; BTVER2-LABEL: test_maxps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_maxps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1298,14 +1298,14 @@ ; ; BTVER2-LABEL: test_minpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_minpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1329,14 +1329,14 @@ ; ; BTVER2-LABEL: test_minps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_minps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1363,14 +1363,14 @@ ; BTVER2-LABEL: test_movapd: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movapd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <4 x double>, <4 x double> *%a0, align 32 @@ -1397,14 +1397,14 @@ ; BTVER2-LABEL: test_movaps: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movaps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <8 x float>, <8 x float> *%a0, align 32 @@ -1430,16 +1430,16 @@ ; ; BTVER2-LABEL: test_movddup: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00] -; BTVER2-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [6:1.00] +; BTVER2-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [6:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movddup: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [5:1.00] -; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [6:1.00] +; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [6:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -1458,18 +1458,18 @@ ; HASWELL-LABEL: test_movmskpd: ; HASWELL: # BB#0: ; HASWELL-NEXT: vmovmskpd %ymm0, %eax # sched: [2:1.00] -; HASWELL-NEXT: vzeroupper # sched: [1:0.00] +; HASWELL-NEXT: vzeroupper # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_movmskpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movmskpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [3:1.00] +; ZNVER1-NEXT: vzeroupper # sched: [46:46.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ret i32 %1 @@ -1486,18 +1486,18 @@ ; HASWELL-LABEL: test_movmskps: ; HASWELL: # BB#0: ; HASWELL-NEXT: vmovmskps %ymm0, %eax # sched: [2:1.00] -; HASWELL-NEXT: vzeroupper # sched: [1:0.00] +; HASWELL-NEXT: vzeroupper # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_movmskps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.50] +; BTVER2-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movmskps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [3:1.00] +; ZNVER1-NEXT: vzeroupper # sched: [46:46.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ret i32 %1 @@ -1519,14 +1519,14 @@ ; ; BTVER2-LABEL: test_movntpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovntpd %ymm0, (%rdi) # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movntpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <4 x double> %a0, %a0 store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0 @@ -1548,14 +1548,14 @@ ; ; BTVER2-LABEL: test_movntps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovntps %ymm0, (%rdi) # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movntps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fadd <8 x float> %a0, %a0 store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0 @@ -1579,16 +1579,16 @@ ; ; BTVER2-LABEL: test_movshdup: ; BTVER2: # BB#0: +; BTVER2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [6:1.00] ; BTVER2-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00] -; BTVER2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movshdup: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [6:1.00] ; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [5:1.00] -; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -1614,16 +1614,16 @@ ; ; BTVER2-LABEL: test_movsldup: ; BTVER2: # BB#0: +; BTVER2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [6:1.00] ; BTVER2-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00] -; BTVER2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movsldup: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [6:1.00] ; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [5:1.00] -; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -1651,16 +1651,16 @@ ; ; BTVER2-LABEL: test_movupd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vmovupd (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovupd %ymm0, (%rsi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movupd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:1.00] +; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <4 x double>, <4 x double> *%a0, align 1 %2 = fadd <4 x double> %1, %1 @@ -1687,16 +1687,16 @@ ; ; BTVER2-LABEL: test_movups: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00] +; BTVER2-NEXT: vmovups (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmovups %ymm0, (%rsi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_movups: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:1.00] +; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = load <8 x float>, <8 x float> *%a0, align 1 %2 = fadd <8 x float> %1, %1 @@ -1719,14 +1719,14 @@ ; ; BTVER2-LABEL: test_mulpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00] +; BTVER2-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_mulpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:4.00] +; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [9:4.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fmul <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1749,14 +1749,14 @@ ; ; BTVER2-LABEL: test_mulps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_mulps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [7:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fmul <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1781,16 +1781,16 @@ ; ; BTVER2-LABEL: orpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: orpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -1820,16 +1820,16 @@ ; ; BTVER2-LABEL: test_orps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_orps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -1859,15 +1859,15 @@ ; ; BTVER2-LABEL: test_permilpd: ; BTVER2: # BB#0: +; BTVER2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [6:1.00] ; BTVER2-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00] -; BTVER2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilpd: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [6:1.00] ; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [6:1.00] -; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> @@ -1894,16 +1894,16 @@ ; ; BTVER2-LABEL: test_permilpd_ymm: ; BTVER2: # BB#0: +; BTVER2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [6:1.00] ; BTVER2-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00] -; BTVER2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilpd_ymm: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [6:1.00] ; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [6:1.00] -; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -1929,15 +1929,15 @@ ; ; BTVER2-LABEL: test_permilps: ; BTVER2: # BB#0: +; BTVER2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [6:1.00] ; BTVER2-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00] -; BTVER2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilps: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [6:1.00] ; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [6:1.00] -; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> @@ -1964,16 +1964,16 @@ ; ; BTVER2-LABEL: test_permilps_ymm: ; BTVER2: # BB#0: +; BTVER2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [6:1.00] ; BTVER2-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00] -; BTVER2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilps_ymm: ; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [6:1.00] ; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [6:1.00] -; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2028,13 +2028,13 @@ ; ; BTVER2-LABEL: test_permilvarpd_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; BTVER2-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilvarpd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) @@ -2090,13 +2090,13 @@ ; ; BTVER2-LABEL: test_permilvarps_ymm: ; BTVER2: # BB#0: -; BTVER2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; BTVER2-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_permilvarps_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [3:3.00] ; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) @@ -2125,14 +2125,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_rcpps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [7:1.00] ; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2160,15 +2160,15 @@ ; BTVER2-LABEL: test_roundpd: ; BTVER2: # BB#0: ; BTVER2-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_roundpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -2196,15 +2196,15 @@ ; BTVER2-LABEL: test_roundps: ; BTVER2: # BB#0: ; BTVER2-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00] -; BTVER2-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_roundps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [8:1.00] -; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2231,16 +2231,16 @@ ; ; BTVER2-LABEL: test_rsqrtps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:1.00] -; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_rsqrtps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:1.00] -; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:1.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [7:2.00] +; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [2:2.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2267,16 +2267,16 @@ ; ; BTVER2-LABEL: test_shufpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50] +; BTVER2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [6:1.00] ; BTVER2-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_shufpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50] +; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [6:1.00] ; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2300,13 +2300,13 @@ ; ; BTVER2-LABEL: test_shufps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50] +; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [6:1.00] ; BTVER2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_shufps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50] +; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [6:1.00] ; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [6:1.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> @@ -2332,16 +2332,16 @@ ; ; BTVER2-LABEL: test_sqrtpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [26:21.00] -; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [21:21.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00] +; BTVER2-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_sqrtpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [26:21.00] -; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [21:21.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [59:54.00] +; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [54:54.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) %2 = load <4 x double>, <4 x double> *%a1, align 32 @@ -2368,16 +2368,16 @@ ; ; BTVER2-LABEL: test_sqrtps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [26:21.00] -; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:21.00] -; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vsqrtps %ymm0, %ymm0 # sched: [54:54.00] +; BTVER2-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_sqrtps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [26:21.00] -; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [21:21.00] -; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [54:54.00] +; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [47:42.00] +; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 @@ -2402,14 +2402,14 @@ ; ; BTVER2-LABEL: test_subpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_subpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fsub <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2432,14 +2432,14 @@ ; ; BTVER2-LABEL: test_subps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_subps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [8:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = fsub <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -2469,18 +2469,18 @@ ; BTVER2-LABEL: test_testpd: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestpd %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestpd (%rdi), %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestpd %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:1.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) @@ -2509,26 +2509,26 @@ ; HASWELL-NEXT: setb %al # sched: [1:0.50] ; HASWELL-NEXT: vtestpd (%rdi), %ymm0 # sched: [5:0.50] ; HASWELL-NEXT: adcl $0, %eax # sched: [2:0.50] -; HASWELL-NEXT: vzeroupper # sched: [1:0.00] +; HASWELL-NEXT: vzeroupper # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_testpd_ymm: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestpd %ymm1, %ymm0 # sched: [3:1.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestpd (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestpd (%rdi), %ymm0 # sched: [9:3.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testpd_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestpd %ymm1, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [9:3.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vzeroupper # sched: [46:46.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2560,18 +2560,18 @@ ; BTVER2-LABEL: test_testps: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestps %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestps (%rdi), %xmm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestps (%rdi), %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testps: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestps %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:1.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) @@ -2600,26 +2600,26 @@ ; HASWELL-NEXT: setb %al # sched: [1:0.50] ; HASWELL-NEXT: vtestps (%rdi), %ymm0 # sched: [5:0.50] ; HASWELL-NEXT: adcl $0, %eax # sched: [2:0.50] -; HASWELL-NEXT: vzeroupper # sched: [1:0.00] +; HASWELL-NEXT: vzeroupper # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_testps_ymm: ; BTVER2: # BB#0: ; BTVER2-NEXT: xorl %eax, %eax # sched: [1:0.50] -; BTVER2-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vtestps %ymm1, %ymm0 # sched: [3:1.00] ; BTVER2-NEXT: setb %al # sched: [1:0.50] -; BTVER2-NEXT: vtestps (%rdi), %ymm0 # sched: [6:1.00] +; BTVER2-NEXT: vtestps (%rdi), %ymm0 # sched: [9:3.00] ; BTVER2-NEXT: adcl $0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_testps_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: xorl %eax, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vtestps %ymm1, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: setb %al # sched: [1:0.50] -; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [9:3.00] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.50] -; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vzeroupper # sched: [46:46.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -2648,14 +2648,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50] ; BTVER2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_unpckhpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2713,14 +2713,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50] ; BTVER2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_unpcklpd: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -2776,16 +2776,16 @@ ; ; BTVER2-LABEL: test_xorpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_xorpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> @@ -2815,16 +2815,16 @@ ; ; BTVER2-LABEL: test_xorps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; BTVER2-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; BTVER2-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_xorps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [6:1.00] ; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [6:1.00] -; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> @@ -2845,17 +2845,17 @@ ; ; HASWELL-LABEL: test_zeroall: ; HASWELL: # BB#0: -; HASWELL-NEXT: vzeroall # sched: [1:0.00] +; HASWELL-NEXT: vzeroall # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_zeroall: ; BTVER2: # BB#0: -; BTVER2-NEXT: vzeroall # sched: [?:0.000000e+00] +; BTVER2-NEXT: vzeroall # sched: [90:90.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_zeroall: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vzeroall # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vzeroall # sched: [90:90.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] call void @llvm.x86.avx.vzeroall() ret void @@ -2870,17 +2870,17 @@ ; ; HASWELL-LABEL: test_zeroupper: ; HASWELL: # BB#0: -; HASWELL-NEXT: vzeroupper # sched: [1:0.00] +; HASWELL-NEXT: vzeroupper # sched: [1:1.00] ; HASWELL-NEXT: retq # sched: [1:1.00] ; ; BTVER2-LABEL: test_zeroupper: ; BTVER2: # BB#0: -; BTVER2-NEXT: vzeroupper # sched: [?:0.000000e+00] +; BTVER2-NEXT: vzeroupper # sched: [46:46.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_zeroupper: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vzeroupper # sched: [?:0.000000e+00] +; ZNVER1-NEXT: vzeroupper # sched: [46:46.00] ; ZNVER1-NEXT: retq # sched: [4:1.00] call void @llvm.x86.avx.vzeroupper() ret void Index: test/CodeGen/X86/recip-fastmath.ll =================================================================== --- test/CodeGen/X86/recip-fastmath.ll +++ test/CodeGen/X86/recip-fastmath.ll @@ -278,7 +278,7 @@ ; ; BTVER2-LABEL: v4f32_no_estimate: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [19:19.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; @@ -340,7 +340,7 @@ ; ; BTVER2-LABEL: v4f32_one_step: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00] @@ -439,7 +439,7 @@ ; ; BTVER2-LABEL: v4f32_two_step: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00] @@ -541,7 +541,7 @@ ; BTVER2-LABEL: v8f32_no_estimate: ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] -; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:19.00] +; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [38:38.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_no_estimate: @@ -611,10 +611,10 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_one_step: @@ -723,14 +723,14 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_two_step: Index: test/CodeGen/X86/recip-fastmath2.ll =================================================================== --- test/CodeGen/X86/recip-fastmath2.ll +++ test/CodeGen/X86/recip-fastmath2.ll @@ -392,7 +392,7 @@ ; ; BTVER2-LABEL: v4f32_one_step2: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00] @@ -489,7 +489,7 @@ ; ; BTVER2-LABEL: v4f32_one_step_2_divs: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00] @@ -604,7 +604,7 @@ ; ; BTVER2-LABEL: v4f32_two_step2: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [6:1.00] ; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00] ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00] ; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00] @@ -730,11 +730,11 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_one_step2: @@ -836,12 +836,12 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_one_step_2_divs: @@ -965,15 +965,15 @@ ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] ; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_two_step2: @@ -1119,7 +1119,7 @@ ; BTVER2-LABEL: v8f32_no_step2: ; BTVER2: # BB#0: ; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00] -; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: v8f32_no_step2: Index: test/CodeGen/X86/slow-unaligned-mem.ll =================================================================== --- test/CodeGen/X86/slow-unaligned-mem.ll +++ test/CodeGen/X86/slow-unaligned-mem.ll @@ -86,7 +86,7 @@ ; FAST-NOT: not a recognized processor ; FAST-LABEL: store_zeros: ; FAST: # BB#0: -; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax +; FAST: movl {{[0-9]+}}(%esp), %eax ; FAST-NOT: movl call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i32 1, i1 false) ret void Index: test/CodeGen/X86/sse-schedule.ll =================================================================== --- test/CodeGen/X86/sse-schedule.ll +++ test/CodeGen/X86/sse-schedule.ll @@ -1031,7 +1031,7 @@ ; ; BTVER2-LABEL: test_movaps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vmovaps (%rdi), %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] @@ -1077,7 +1077,7 @@ ; ; BTVER2-LABEL: test_movhlps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50] +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> ret <4 x float> %1 @@ -1174,7 +1174,7 @@ ; ; BTVER2-LABEL: test_movlhps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [6:1.00] ; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> @@ -2033,7 +2033,7 @@ ; ; BTVER2-LABEL: test_sqrtss: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovaps (%rdi), %xmm1 # sched: [5:1.00] +; BTVER2-NEXT: vmovaps (%rdi), %xmm1 # sched: [6:1.00] ; BTVER2-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [26:21.00] ; BTVER2-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] @@ -2303,7 +2303,7 @@ ; ; BTVER2-LABEL: test_unpckhps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [6:1.00] ; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> @@ -2349,7 +2349,7 @@ ; ; BTVER2-LABEL: test_unpcklps: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [6:1.00] ; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> Index: test/CodeGen/X86/sse2-schedule.ll =================================================================== --- test/CodeGen/X86/sse2-schedule.ll +++ test/CodeGen/X86/sse2-schedule.ll @@ -1628,7 +1628,7 @@ ; ; BTVER2-LABEL: test_movapd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovapd (%rdi), %xmm0 # sched: [5:1.00] +; BTVER2-NEXT: vmovapd (%rdi), %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] @@ -2275,7 +2275,7 @@ ; ; BTVER2-LABEL: test_movsd_reg: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> ret <2 x double> %1 @@ -5736,7 +5736,7 @@ ; ; BTVER2-LABEL: test_sqrtsd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vmovapd (%rdi), %xmm1 # sched: [5:1.00] +; BTVER2-NEXT: vmovapd (%rdi), %xmm1 # sched: [6:1.00] ; BTVER2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [26:21.00] ; BTVER2-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [26:21.00] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] @@ -5963,7 +5963,7 @@ ; ; BTVER2-LABEL: test_unpckhpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50] +; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [6:1.00] ; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [6:1.00] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] @@ -6018,7 +6018,7 @@ ; ; BTVER2-LABEL: test_unpcklpd: ; BTVER2: # BB#0: -; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [6:1.00] ; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [6:1.00] ; BTVER2-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00]