Index: llvm/trunk/include/llvm/CodeGen/TargetSchedule.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/TargetSchedule.h +++ llvm/trunk/include/llvm/CodeGen/TargetSchedule.h @@ -196,9 +196,9 @@ const MachineInstr *DepMI) const; /// Compute the reciprocal throughput of the given instruction. - Optional computeReciprocalThroughput(const MachineInstr *MI) const; - Optional computeReciprocalThroughput(const MCInst &MI) const; - Optional computeReciprocalThroughput(unsigned Opcode) const; + double computeReciprocalThroughput(const MachineInstr *MI) const; + double computeReciprocalThroughput(const MCInst &MI) const; + double computeReciprocalThroughput(unsigned Opcode) const; }; } // end namespace llvm Index: llvm/trunk/include/llvm/MC/MCSchedule.h =================================================================== --- llvm/trunk/include/llvm/MC/MCSchedule.h +++ llvm/trunk/include/llvm/MC/MCSchedule.h @@ -362,14 +362,14 @@ const MCInst &Inst) const; // Returns the reciprocal throughput information from a MCSchedClassDesc. - static Optional + static double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc); - static Optional + static double getReciprocalThroughput(unsigned SchedClass, const InstrItineraryData &IID); - Optional + double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCInstrInfo &MCII, const MCInst &Inst) const; Index: llvm/trunk/lib/CodeGen/TargetSchedule.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetSchedule.cpp +++ llvm/trunk/lib/CodeGen/TargetSchedule.cpp @@ -322,7 +322,7 @@ return 0; } -Optional +double TargetSchedModel::computeReciprocalThroughput(const MachineInstr *MI) const { if (hasInstrItineraries()) { unsigned SchedClass = MI->getDesc().getSchedClass(); @@ -332,10 +332,11 @@ if (hasInstrSchedModel()) return MCSchedModel::getReciprocalThroughput(*STI, *resolveSchedClass(MI)); - return Optional(); + + return 0.0; } -Optional +double TargetSchedModel::computeReciprocalThroughput(unsigned Opcode) const { unsigned SchedClass = TII->get(Opcode).getSchedClass(); if (hasInstrItineraries()) @@ -346,10 +347,11 @@ if (SCDesc.isValid() && !SCDesc.isVariant()) return MCSchedModel::getReciprocalThroughput(*STI, SCDesc); } - return Optional(); + + return 0.0; } -Optional +double TargetSchedModel::computeReciprocalThroughput(const MCInst &MI) const { if (hasInstrSchedModel()) return SchedModel.getReciprocalThroughput(*STI, *TII, MI); Index: llvm/trunk/lib/CodeGen/TargetSubtargetInfo.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetSubtargetInfo.cpp +++ llvm/trunk/lib/CodeGen/TargetSubtargetInfo.cpp @@ -67,13 +67,12 @@ return false; } -static std::string createSchedInfoStr(unsigned Latency, - Optional RThroughput) { +static std::string createSchedInfoStr(unsigned Latency, double RThroughput) { static const char *SchedPrefix = " sched: ["; std::string Comment; raw_string_ostream CS(Comment); - if (RThroughput.hasValue()) - CS << SchedPrefix << Latency << format(":%2.2f", RThroughput.getValue()) + if (RThroughput != 0.0) + CS << SchedPrefix << Latency << format(":%2.2f", RThroughput) << "]"; else CS << SchedPrefix << Latency << ":?]"; @@ -90,7 +89,7 @@ TargetSchedModel TSchedModel; TSchedModel.init(this); unsigned Latency = TSchedModel.computeInstrLatency(&MI); - Optional RThroughput = TSchedModel.computeReciprocalThroughput(&MI); + double RThroughput = TSchedModel.computeReciprocalThroughput(&MI); return createSchedInfoStr(Latency, RThroughput); } @@ -109,8 +108,7 @@ getInstrInfo()->get(MCI.getOpcode()).getSchedClass()); } else return std::string(); - Optional RThroughput = - TSchedModel.computeReciprocalThroughput(MCI); + double RThroughput = TSchedModel.computeReciprocalThroughput(MCI); return createSchedInfoStr(Latency, RThroughput); } Index: llvm/trunk/lib/MC/MCSchedule.cpp =================================================================== --- llvm/trunk/lib/MC/MCSchedule.cpp +++ llvm/trunk/lib/MC/MCSchedule.cpp @@ -85,7 +85,7 @@ llvm_unreachable("unsupported variant scheduling class"); } -Optional +double MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc) { Optional Throughput; @@ -99,18 +99,25 @@ double Temp = NumUnits * 1.0 / I->Cycles; Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp; } - return Throughput ? 1 / Throughput.getValue() : Throughput; + if (Throughput.hasValue()) + return 1.0 / Throughput.getValue(); + + // If no throughput value was calculated, assume that we can execute at the + // maximum issue width scaled by number of micro-ops for the schedule class. + return ((double)SCDesc.NumMicroOps) / SM.IssueWidth; } -Optional +double MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, const MCInstrInfo &MCII, const MCInst &Inst) const { - Optional Throughput; unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass(); const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass); + + // If there's no valid class, assume that the instruction executes/completes + // at the maximum issue width. if (!SCDesc->isValid()) - return Throughput; + return 1.0 / IssueWidth; unsigned CPUID = getProcessorID(); while (SCDesc->isVariant()) { @@ -124,7 +131,7 @@ llvm_unreachable("unsupported variant scheduling class"); } -Optional +double MCSchedModel::getReciprocalThroughput(unsigned SchedClass, const InstrItineraryData &IID) { Optional Throughput; @@ -136,5 +143,10 @@ double Temp = countPopulation(I->getUnits()) * 1.0 / I->getCycles(); Throughput = Throughput ? std::min(Throughput.getValue(), Temp) : Temp; } - return Throughput ? 1 / Throughput.getValue() : Throughput; + if (Throughput.hasValue()) + return 1.0 / Throughput.getValue(); + + // If there are no execution resources specified for this class, then assume + // that it can execute at the maximum default issue width. + return 1.0 / DefaultIssueWidth; } Index: llvm/trunk/test/CodeGen/X86/avx-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-schedule.ll +++ llvm/trunk/test/CodeGen/X86/avx-schedule.ll @@ -1648,8 +1648,8 @@ ; ; ZNVER1-LABEL: test_dpps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1711,7 +1711,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33] ; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> %2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> @@ -1764,8 +1764,8 @@ ; ; ZNVER1-LABEL: test_haddpd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1819,8 +1819,8 @@ ; ; ZNVER1-LABEL: test_haddps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -1874,8 +1874,8 @@ ; ; ZNVER1-LABEL: test_hsubpd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -1929,8 +1929,8 @@ ; ; ZNVER1-LABEL: test_hsubps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -2753,7 +2753,7 @@ ; ZNVER1-LABEL: test_movmskpd: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:1.00] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ret i32 %1 @@ -2805,7 +2805,7 @@ ; ZNVER1-LABEL: test_movmskps: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:1.00] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ret i32 %1 @@ -2873,7 +2873,7 @@ ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: vmovntdq %ymm0, (%rdi) # sched: [1:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "vmovntdq $0, $1", "x,*m"(<4 x i64> %a0, <4 x i64> *%a1) ret void @@ -3533,8 +3533,8 @@ ; ; ZNVER1-LABEL: test_perm2f128: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [100:?] -; ZNVER1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [100:?] +; ZNVER1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [100:0.25] +; ZNVER1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [100:0.25] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> @@ -4791,7 +4791,7 @@ ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 @@ -4958,7 +4958,7 @@ ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 @@ -5369,12 +5369,12 @@ ; ; BTVER2-LABEL: test_zeroall: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vzeroall # sched: [90:?] +; BTVER2-NEXT: vzeroall # sched: [90:36.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_zeroall: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vzeroall # sched: [100:?] +; ZNVER1-NEXT: vzeroall # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.avx.vzeroall() ret void @@ -5414,12 +5414,12 @@ ; ; BTVER2-LABEL: test_zeroupper: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vzeroupper # sched: [46:?] +; BTVER2-NEXT: vzeroupper # sched: [46:18.50] ; BTVER2-NEXT: retq # sched: [4:1.00] ; ; ZNVER1-LABEL: test_zeroupper: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.avx.vzeroupper() ret void Index: llvm/trunk/test/CodeGen/X86/avx2-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx2-schedule.ll +++ llvm/trunk/test/CodeGen/X86/avx2-schedule.ll @@ -223,7 +223,7 @@ ; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [2:0.25] ; ZNVER1-NEXT: vextracti128 $1, %ymm2, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = add <8 x i32> %a0, %a1 %2 = sub <8 x i32> %a0, %a1 @@ -261,7 +261,7 @@ ; ; ZNVER1-LABEL: test_gatherdpd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3, i8 2) ret <2 x double> %1 @@ -296,7 +296,7 @@ ; ; ZNVER1-LABEL: test_gatherdpd_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3, i8 8) ret <4 x double> %1 @@ -331,7 +331,7 @@ ; ; ZNVER1-LABEL: test_gatherdps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3, i8 2) ret <4 x float> %1 @@ -366,7 +366,7 @@ ; ; ZNVER1-LABEL: test_gatherdps_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3, i8 4) ret <8 x float> %1 @@ -401,7 +401,7 @@ ; ; ZNVER1-LABEL: test_gatherqpd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3, i8 2) ret <2 x double> %1 @@ -436,7 +436,7 @@ ; ; ZNVER1-LABEL: test_gatherqpd_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3, i8 8) ret <4 x double> %1 @@ -471,7 +471,7 @@ ; ; ZNVER1-LABEL: test_gatherqps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3, i8 2) ret <4 x float> %1 @@ -511,8 +511,8 @@ ; ; ZNVER1-LABEL: test_gatherqps_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0, i8* %a1, <4 x i64> %a2, <4 x float> %a3, i8 4) ret <4 x float> %1 @@ -638,8 +638,8 @@ ; ; ZNVER1-LABEL: test_mpsadbw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) %2 = bitcast <16 x i16> %1 to <32 x i8> @@ -2777,7 +2777,7 @@ ; ; ZNVER1-LABEL: test_pgatherdd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3, i8 2) ret <4 x i32> %1 @@ -2812,7 +2812,7 @@ ; ; ZNVER1-LABEL: test_pgatherdd_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3, i8 2) ret <8 x i32> %1 @@ -2847,7 +2847,7 @@ ; ; ZNVER1-LABEL: test_pgatherdq: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3, i8 2) ret <2 x i64> %1 @@ -2882,7 +2882,7 @@ ; ; ZNVER1-LABEL: test_pgatherdq_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3, i8 2) ret <4 x i64> %1 @@ -2917,7 +2917,7 @@ ; ; ZNVER1-LABEL: test_pgatherqd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3, i8 2) ret <4 x i32> %1 @@ -2957,8 +2957,8 @@ ; ; ZNVER1-LABEL: test_pgatherqd_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0, i8* %a1, <4 x i64> %a2, <4 x i32> %a3, i8 2) ret <4 x i32> %1 @@ -2993,7 +2993,7 @@ ; ; ZNVER1-LABEL: test_pgatherqq: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0, i8* %a1, <2 x i64> %a2, <2 x i64> %a3, i8 2) ret <2 x i64> %1 @@ -3028,7 +3028,7 @@ ; ; ZNVER1-LABEL: test_pgatherqq_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0, i8* %a1, <4 x i64> %a2, <4 x i64> %a3, i8 2) ret <4 x i64> %1 @@ -3068,8 +3068,8 @@ ; ; ZNVER1-LABEL: test_phaddd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) %2 = load <8 x i32>, <8 x i32> *%a2, align 32 @@ -3111,8 +3111,8 @@ ; ; ZNVER1-LABEL: test_phaddsw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) %2 = load <16 x i16>, <16 x i16> *%a2, align 32 @@ -3154,8 +3154,8 @@ ; ; ZNVER1-LABEL: test_phaddw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) %2 = load <16 x i16>, <16 x i16> *%a2, align 32 @@ -3197,8 +3197,8 @@ ; ; ZNVER1-LABEL: test_phsubd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) %2 = load <8 x i32>, <8 x i32> *%a2, align 32 @@ -3240,8 +3240,8 @@ ; ; ZNVER1-LABEL: test_phsubsw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) %2 = load <16 x i16>, <16 x i16> *%a2, align 32 @@ -3283,8 +3283,8 @@ ; ; ZNVER1-LABEL: test_phsubw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [100:?] -; ZNVER1-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) %2 = load <16 x i16>, <16 x i16> *%a2, align 32 @@ -3419,8 +3419,8 @@ ; ; ZNVER1-LABEL: test_pmaskmovd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [100:?] -; ZNVER1-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [100:?] +; ZNVER1-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [100:0.25] +; ZNVER1-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) @@ -3468,8 +3468,8 @@ ; ; ZNVER1-LABEL: test_pmaskmovd_ymm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [100:?] -; ZNVER1-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [100:?] +; ZNVER1-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [100:0.25] +; ZNVER1-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) @@ -3518,7 +3518,7 @@ ; ZNVER1-LABEL: test_pmaskmovq: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [8:1.00] -; ZNVER1-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [100:?] +; ZNVER1-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) @@ -3567,7 +3567,7 @@ ; ZNVER1-LABEL: test_pmaskmovq_ymm: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [9:1.50] -; ZNVER1-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [100:?] +; ZNVER1-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) @@ -4127,7 +4127,7 @@ ; ZNVER1-LABEL: test_pmovmskb: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vpmovmskb %ymm0, %eax # sched: [2:2.00] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0) ret i32 %1 Index: llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll +++ llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll @@ -264,8 +264,8 @@ ; ; ZNVER1-LABEL: test_pdep_i32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx # sched: [100:?] -; ZNVER1-NEXT: pdepl %esi, %edi, %eax # sched: [100:?] +; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx # sched: [100:0.25] +; ZNVER1-NEXT: pdepl %esi, %edi, %eax # sched: [100:0.25] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 @@ -314,8 +314,8 @@ ; ; ZNVER1-LABEL: test_pdep_i64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [100:?] -; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax # sched: [100:?] +; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [100:0.25] +; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax # sched: [100:0.25] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 @@ -364,8 +364,8 @@ ; ; ZNVER1-LABEL: test_pext_i32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx # sched: [100:?] -; ZNVER1-NEXT: pextl %esi, %edi, %eax # sched: [100:?] +; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx # sched: [100:0.25] +; ZNVER1-NEXT: pextl %esi, %edi, %eax # sched: [100:0.25] ; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i32, i32 *%a2 @@ -414,8 +414,8 @@ ; ; ZNVER1-LABEL: test_pext_i64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx # sched: [100:?] -; ZNVER1-NEXT: pextq %rsi, %rdi, %rax # sched: [100:?] +; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx # sched: [100:0.25] +; ZNVER1-NEXT: pextq %rsi, %rdi, %rax # sched: [100:0.25] ; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load i64, i64 *%a2 Index: llvm/trunk/test/CodeGen/X86/clzero-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/clzero-schedule.ll +++ llvm/trunk/test/CodeGen/X86/clzero-schedule.ll @@ -12,7 +12,7 @@ ; ZNVER1-LABEL: test_clzero: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25] -; ZNVER1-NEXT: clzero # sched: [100:?] +; ZNVER1-NEXT: clzero # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.clzero(i8* %p) ret void Index: llvm/trunk/test/CodeGen/X86/f16c-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/f16c-schedule.ll +++ llvm/trunk/test/CodeGen/X86/f16c-schedule.ll @@ -52,8 +52,8 @@ ; ; ZNVER1-LABEL: test_vcvtph2ps_128: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [100:?] -; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [100:0.25] +; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x i16>, <8 x i16> *%a1 @@ -109,8 +109,8 @@ ; ; ZNVER1-LABEL: test_vcvtph2ps_256: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [100:?] -; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [100:0.25] +; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [100:0.25] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x i16>, <8 x i16> *%a1 @@ -160,8 +160,8 @@ ; ; ZNVER1-LABEL: test_vcvtps2ph_128: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:?] +; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) %2 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a1, i32 0) @@ -215,9 +215,9 @@ ; ; ZNVER1-LABEL: test_vcvtps2ph_256: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:?] -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) %2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0) Index: llvm/trunk/test/CodeGen/X86/fma-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fma-schedule.ll +++ llvm/trunk/test/CodeGen/X86/fma-schedule.ll @@ -187,7 +187,7 @@ ; ZNVER1-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50] ; ZNVER1-NEXT: vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmadd132pd $2, $1, $0 \0A\09 vfmadd213pd $2, $1, $0 \0A\09 vfmadd231pd $2, $1, $0 \0A\09 vfmadd132pd $3, $1, $0 \0A\09 vfmadd213pd $3, $1, $0 \0A\09 vfmadd231pd $3, $1, $0", "x,x,x,*m"(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) nounwind ret void @@ -369,7 +369,7 @@ ; ZNVER1-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50] ; ZNVER1-NEXT: vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmadd132ps $2, $1, $0 \0A\09 vfmadd213ps $2, $1, $0 \0A\09 vfmadd231ps $2, $1, $0 \0A\09 vfmadd132ps $3, $1, $0 \0A\09 vfmadd213ps $3, $1, $0 \0A\09 vfmadd231ps $3, $1, $0", "x,x,x,*m"(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) nounwind ret void @@ -731,7 +731,7 @@ ; ZNVER1-NEXT: vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50] ; ZNVER1-NEXT: vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmaddsub132pd $2, $1, $0 \0A\09 vfmaddsub213pd $2, $1, $0 \0A\09 vfmaddsub231pd $2, $1, $0 \0A\09 vfmaddsub132pd $3, $1, $0 \0A\09 vfmaddsub213pd $3, $1, $0 \0A\09 vfmaddsub231pd $3, $1, $0", "x,x,x,*m"(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) nounwind ret void @@ -913,7 +913,7 @@ ; ZNVER1-NEXT: vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50] ; ZNVER1-NEXT: vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmaddsub132ps $2, $1, $0 \0A\09 vfmaddsub213ps $2, $1, $0 \0A\09 vfmaddsub231ps $2, $1, $0 \0A\09 vfmaddsub132ps $3, $1, $0 \0A\09 vfmaddsub213ps $3, $1, $0 \0A\09 vfmaddsub231ps $3, $1, $0", "x,x,x,*m"(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) nounwind ret void @@ -1099,7 +1099,7 @@ ; ZNVER1-NEXT: vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50] ; ZNVER1-NEXT: vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmsubadd132pd $2, $1, $0 \0A\09 vfmsubadd213pd $2, $1, $0 \0A\09 vfmsubadd231pd $2, $1, $0 \0A\09 vfmsubadd132pd $3, $1, $0 \0A\09 vfmsubadd213pd $3, $1, $0 \0A\09 vfmsubadd231pd $3, $1, $0", "x,x,x,*m"(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) nounwind ret void @@ -1281,7 +1281,7 @@ ; ZNVER1-NEXT: vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50] ; ZNVER1-NEXT: vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmsubadd132ps $2, $1, $0 \0A\09 vfmsubadd213ps $2, $1, $0 \0A\09 vfmsubadd231ps $2, $1, $0 \0A\09 vfmsubadd132ps $3, $1, $0 \0A\09 vfmsubadd213ps $3, $1, $0 \0A\09 vfmsubadd231ps $3, $1, $0", "x,x,x,*m"(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) nounwind ret void @@ -1467,7 +1467,7 @@ ; ZNVER1-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50] ; ZNVER1-NEXT: vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmsub132pd $2, $1, $0 \0A\09 vfmsub213pd $2, $1, $0 \0A\09 vfmsub231pd $2, $1, $0 \0A\09 vfmsub132pd $3, $1, $0 \0A\09 vfmsub213pd $3, $1, $0 \0A\09 vfmsub231pd $3, $1, $0", "x,x,x,*m"(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) nounwind ret void @@ -1649,7 +1649,7 @@ ; ZNVER1-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50] ; ZNVER1-NEXT: vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfmsub132ps $2, $1, $0 \0A\09 vfmsub213ps $2, $1, $0 \0A\09 vfmsub231ps $2, $1, $0 \0A\09 vfmsub132ps $3, $1, $0 \0A\09 vfmsub213ps $3, $1, $0 \0A\09 vfmsub231ps $3, $1, $0", "x,x,x,*m"(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) nounwind ret void @@ -2011,7 +2011,7 @@ ; ZNVER1-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50] ; ZNVER1-NEXT: vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfnmadd132pd $2, $1, $0 \0A\09 vfnmadd213pd $2, $1, $0 \0A\09 vfnmadd231pd $2, $1, $0 \0A\09 vfnmadd132pd $3, $1, $0 \0A\09 vfnmadd213pd $3, $1, $0 \0A\09 vfnmadd231pd $3, $1, $0", "x,x,x,*m"(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) nounwind ret void @@ -2193,7 +2193,7 @@ ; ZNVER1-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50] ; ZNVER1-NEXT: vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfnmadd132ps $2, $1, $0 \0A\09 vfnmadd213ps $2, $1, $0 \0A\09 vfnmadd231ps $2, $1, $0 \0A\09 vfnmadd132ps $3, $1, $0 \0A\09 vfnmadd213ps $3, $1, $0 \0A\09 vfnmadd231ps $3, $1, $0", "x,x,x,*m"(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) nounwind ret void @@ -2555,7 +2555,7 @@ ; ZNVER1-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50] ; ZNVER1-NEXT: vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfnmsub132pd $2, $1, $0 \0A\09 vfnmsub213pd $2, $1, $0 \0A\09 vfnmsub231pd $2, $1, $0 \0A\09 vfnmsub132pd $3, $1, $0 \0A\09 vfnmsub213pd $3, $1, $0 \0A\09 vfnmsub231pd $3, $1, $0", "x,x,x,*m"(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> *%a3) nounwind ret void @@ -2737,7 +2737,7 @@ ; ZNVER1-NEXT: vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50] ; ZNVER1-NEXT: vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50] ; ZNVER1-NEXT: #NO_APP -; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: vzeroupper # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "vfnmsub132ps $2, $1, $0 \0A\09 vfnmsub213ps $2, $1, $0 \0A\09 vfnmsub231ps $2, $1, $0 \0A\09 vfnmsub132ps $3, $1, $0 \0A\09 vfnmsub213ps $3, $1, $0 \0A\09 vfnmsub231ps $3, $1, $0", "x,x,x,*m"(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> *%a3) nounwind ret void Index: llvm/trunk/test/CodeGen/X86/fsgsbase-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fsgsbase-schedule.ll +++ llvm/trunk/test/CodeGen/X86/fsgsbase-schedule.ll @@ -53,7 +53,7 @@ ; ; ZNVER1-LABEL: test_x86_rdfsbase_32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdfsbasel %eax # sched: [100:?] +; ZNVER1-NEXT: rdfsbasel %eax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %res = call i32 @llvm.x86.rdfsbase.32() ret i32 %res @@ -103,7 +103,7 @@ ; ; ZNVER1-LABEL: test_x86_rdgsbase_32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdgsbasel %eax # sched: [100:?] +; ZNVER1-NEXT: rdgsbasel %eax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %res = call i32 @llvm.x86.rdgsbase.32() ret i32 %res @@ -153,7 +153,7 @@ ; ; ZNVER1-LABEL: test_x86_rdfsbase_64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdfsbaseq %rax # sched: [100:?] +; ZNVER1-NEXT: rdfsbaseq %rax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %res = call i64 @llvm.x86.rdfsbase.64() ret i64 %res @@ -203,7 +203,7 @@ ; ; ZNVER1-LABEL: test_x86_rdgsbase_64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdgsbaseq %rax # sched: [100:?] +; ZNVER1-NEXT: rdgsbaseq %rax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %res = call i64 @llvm.x86.rdgsbase.64() ret i64 %res @@ -253,7 +253,7 @@ ; ; ZNVER1-LABEL: test_x86_wrfsbase_32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: wrfsbasel %edi # sched: [100:?] +; ZNVER1-NEXT: wrfsbasel %edi # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.wrfsbase.32(i32 %x) ret void @@ -303,7 +303,7 @@ ; ; ZNVER1-LABEL: test_x86_wrgsbase_32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: wrgsbasel %edi # sched: [100:?] +; ZNVER1-NEXT: wrgsbasel %edi # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.wrgsbase.32(i32 %x) ret void @@ -353,7 +353,7 @@ ; ; ZNVER1-LABEL: test_x86_wrfsbase_64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: wrfsbaseq %rdi # sched: [100:?] +; ZNVER1-NEXT: wrfsbaseq %rdi # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.wrfsbase.64(i64 %x) ret void @@ -403,7 +403,7 @@ ; ; ZNVER1-LABEL: test_x86_wrgsbase_64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: wrgsbaseq %rdi # sched: [100:?] +; ZNVER1-NEXT: wrgsbaseq %rdi # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.wrgsbase.64(i64 %x) ret void Index: llvm/trunk/test/CodeGen/X86/mmx-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mmx-schedule.ll +++ llvm/trunk/test/CodeGen/X86/mmx-schedule.ll @@ -626,7 +626,7 @@ ; ; ZNVER1-LABEL: test_maskmovq: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: maskmovq %mm1, %mm0 # sched: [100:?] +; ZNVER1-NEXT: maskmovq %mm1, %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.mmx.maskmovq(x86_mmx %a0, x86_mmx %a1, i8* %a2) ret void @@ -3096,8 +3096,8 @@ ; ; ZNVER1-LABEL: test_phaddd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: phaddd %mm1, %mm0 # sched: [100:?] -; ZNVER1-NEXT: phaddd (%rdi), %mm0 # sched: [100:?] +; ZNVER1-NEXT: phaddd %mm1, %mm0 # sched: [100:0.25] +; ZNVER1-NEXT: phaddd (%rdi), %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: movq %mm0, %rax # sched: [2:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call x86_mmx @llvm.x86.ssse3.phadd.d(x86_mmx %a0, x86_mmx %a1) @@ -3174,8 +3174,8 @@ ; ; ZNVER1-LABEL: test_phaddsw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: phaddsw %mm1, %mm0 # sched: [100:?] -; ZNVER1-NEXT: phaddsw (%rdi), %mm0 # sched: [100:?] +; ZNVER1-NEXT: phaddsw %mm1, %mm0 # sched: [100:0.25] +; ZNVER1-NEXT: phaddsw (%rdi), %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: movq %mm0, %rax # sched: [2:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call x86_mmx @llvm.x86.ssse3.phadd.sw(x86_mmx %a0, x86_mmx %a1) @@ -3252,8 +3252,8 @@ ; ; ZNVER1-LABEL: test_phaddw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: phaddw %mm1, %mm0 # sched: [100:?] -; ZNVER1-NEXT: phaddw (%rdi), %mm0 # sched: [100:?] +; ZNVER1-NEXT: phaddw %mm1, %mm0 # sched: [100:0.25] +; ZNVER1-NEXT: phaddw (%rdi), %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: movq %mm0, %rax # sched: [2:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call x86_mmx @llvm.x86.ssse3.phadd.w(x86_mmx %a0, x86_mmx %a1) @@ -3330,8 +3330,8 @@ ; ; ZNVER1-LABEL: test_phsubd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: phsubd %mm1, %mm0 # sched: [100:?] -; ZNVER1-NEXT: phsubd (%rdi), %mm0 # sched: [100:?] +; ZNVER1-NEXT: phsubd %mm1, %mm0 # sched: [100:0.25] +; ZNVER1-NEXT: phsubd (%rdi), %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: movq %mm0, %rax # sched: [2:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call x86_mmx @llvm.x86.ssse3.phsub.d(x86_mmx %a0, x86_mmx %a1) @@ -3408,8 +3408,8 @@ ; ; ZNVER1-LABEL: test_phsubsw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: phsubsw %mm1, %mm0 # sched: [100:?] -; ZNVER1-NEXT: phsubsw (%rdi), %mm0 # sched: [100:?] +; ZNVER1-NEXT: phsubsw %mm1, %mm0 # sched: [100:0.25] +; ZNVER1-NEXT: phsubsw (%rdi), %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: movq %mm0, %rax # sched: [2:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call x86_mmx @llvm.x86.ssse3.phsub.sw(x86_mmx %a0, x86_mmx %a1) @@ -3486,8 +3486,8 @@ ; ; ZNVER1-LABEL: test_phsubw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: phsubw %mm1, %mm0 # sched: [100:?] -; ZNVER1-NEXT: phsubw (%rdi), %mm0 # sched: [100:?] +; ZNVER1-NEXT: phsubw %mm1, %mm0 # sched: [100:0.25] +; ZNVER1-NEXT: phsubw (%rdi), %mm0 # sched: [100:0.25] ; ZNVER1-NEXT: movq %mm0, %rax # sched: [2:1.00] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call x86_mmx @llvm.x86.ssse3.phsub.w(x86_mmx %a0, x86_mmx %a1) Index: llvm/trunk/test/CodeGen/X86/mwaitx-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/mwaitx-schedule.ll +++ llvm/trunk/test/CodeGen/X86/mwaitx-schedule.ll @@ -22,7 +22,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25] ; ZNVER1-NEXT: movl %esi, %ecx # sched: [1:0.25] -; ZNVER1-NEXT: monitorx # sched: [100:?] +; ZNVER1-NEXT: monitorx # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.monitorx(i8* %P, i32 %E, i32 %H) ret void @@ -56,7 +56,7 @@ ; ZNVER1-NEXT: movl %edi, %ecx # sched: [1:0.25] ; ZNVER1-NEXT: movl %esi, %eax # sched: [1:0.25] ; ZNVER1-NEXT: movl %edx, %ebx # sched: [1:0.25] -; ZNVER1-NEXT: mwaitx # sched: [100:?] +; ZNVER1-NEXT: mwaitx # sched: [100:0.25] ; ZNVER1-NEXT: popq %rbx # sched: [8:0.50] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.mwaitx(i32 %E, i32 %H, i32 %C) Index: llvm/trunk/test/CodeGen/X86/rdrand-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/rdrand-schedule.ll +++ llvm/trunk/test/CodeGen/X86/rdrand-schedule.ll @@ -50,7 +50,7 @@ ; ; ZNVER1-LABEL: test_rdrand_16: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdrandw %ax # sched: [100:?] +; ZNVER1-NEXT: rdrandw %ax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %call = call {i16, i32} @llvm.x86.rdrand.16() %randval = extractvalue {i16, i32} %call, 0 @@ -95,7 +95,7 @@ ; ; ZNVER1-LABEL: test_rdrand_32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdrandl %eax # sched: [100:?] +; ZNVER1-NEXT: rdrandl %eax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %call = call {i32, i32} @llvm.x86.rdrand.32() %randval = extractvalue {i32, i32} %call, 0 @@ -140,7 +140,7 @@ ; ; ZNVER1-LABEL: test_rdrand_64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdrandq %rax # sched: [100:?] +; ZNVER1-NEXT: rdrandq %rax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %call = call {i64, i32} @llvm.x86.rdrand.64() %randval = extractvalue {i64, i32} %call, 0 Index: llvm/trunk/test/CodeGen/X86/rdseed-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/rdseed-schedule.ll +++ llvm/trunk/test/CodeGen/X86/rdseed-schedule.ll @@ -38,7 +38,7 @@ ; ; ZNVER1-LABEL: test_rdseed_16: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdseedw %ax # sched: [100:?] +; ZNVER1-NEXT: rdseedw %ax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %call = call {i16, i32} @llvm.x86.rdseed.16() %randval = extractvalue {i16, i32} %call, 0 @@ -73,7 +73,7 @@ ; ; ZNVER1-LABEL: test_rdseed_32: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdseedl %eax # sched: [100:?] +; ZNVER1-NEXT: rdseedl %eax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %call = call {i32, i32} @llvm.x86.rdseed.32() %randval = extractvalue {i32, i32} %call, 0 @@ -108,7 +108,7 @@ ; ; ZNVER1-LABEL: test_rdseed_64: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: rdseedq %rax # sched: [100:?] +; ZNVER1-NEXT: rdseedq %rax # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %call = call {i64, i32} @llvm.x86.rdseed.64() %randval = extractvalue {i64, i32} %call, 0 Index: llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll +++ llvm/trunk/test/CodeGen/X86/schedule-x86_32.ll @@ -88,7 +88,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: aaa # sched: [100:?] +; ZNVER1-NEXT: aaa # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] %1 = tail call i8 asm "aaa", "=r,r"(i8 %a0) nounwind @@ -181,8 +181,8 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: aad # sched: [100:?] -; ZNVER1-NEXT: aad $16 # sched: [100:?] +; ZNVER1-NEXT: aad # sched: [100:0.25] +; ZNVER1-NEXT: aad $16 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm "aad \0A\09 aad $1", "r,i"(i16 %a0, i16 16) nounwind @@ -275,8 +275,8 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: aam # sched: [100:?] -; ZNVER1-NEXT: aam $16 # sched: [100:?] +; ZNVER1-NEXT: aam # sched: [100:0.25] +; ZNVER1-NEXT: aam $16 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm "aam \0A\09 aam $1", "r,i"(i8 %a0, i8 16) nounwind @@ -360,7 +360,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: aas # sched: [100:?] +; ZNVER1-NEXT: aas # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] %1 = tail call i8 asm "aas", "=r,r"(i8 %a0) nounwind @@ -454,7 +454,7 @@ ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %ecx # sched: [8:0.50] ; ZNVER1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: arpl %ax, (%ecx) # sched: [100:?] +; ZNVER1-NEXT: arpl %ax, (%ecx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] call void asm sideeffect "arpl $0, $1", "r,*m"(i16 %a0, i16 *%a1) @@ -540,8 +540,8 @@ ; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %edx # sched: [5:0.50] ; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %esi # sched: [5:0.50] ; HASWELL-NEXT: #APP -; HASWELL-NEXT: bound %ax, (%esi) # sched: [1:?] -; HASWELL-NEXT: bound %ecx, (%edx) # sched: [1:?] +; HASWELL-NEXT: bound %ax, (%esi) # sched: [1:3.75] +; HASWELL-NEXT: bound %ecx, (%edx) # sched: [1:3.75] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: popl %esi # sched: [6:0.50] ; HASWELL-NEXT: .cfi_def_cfa_offset 4 @@ -625,8 +625,8 @@ ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %esi # sched: [8:0.50] ; ZNVER1-NEXT: movzwl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: bound %ax, (%esi) # sched: [100:?] -; ZNVER1-NEXT: bound %ecx, (%edx) # sched: [100:?] +; ZNVER1-NEXT: bound %ax, (%esi) # sched: [100:0.25] +; ZNVER1-NEXT: bound %ecx, (%edx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: popl %esi # sched: [8:0.50] ; ZNVER1-NEXT: .cfi_def_cfa_offset 4 @@ -714,7 +714,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: daa # sched: [100:?] +; ZNVER1-NEXT: daa # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] %1 = tail call i8 asm "daa", "=r,r"(i8 %a0) nounwind @@ -798,7 +798,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movb {{[0-9]+}}(%esp), %al # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: das # sched: [100:?] +; ZNVER1-NEXT: das # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] %1 = tail call i8 asm "das", "=r,r"(i8 %a0) nounwind @@ -1251,7 +1251,7 @@ ; HASWELL-LABEL: test_into: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: into # sched: [1:?] +; HASWELL-NEXT: into # sched: [1:1.00] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -1286,7 +1286,7 @@ ; ZNVER1-LABEL: test_into: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: into # sched: [100:?] +; ZNVER1-NEXT: into # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] call void asm sideeffect "into", ""() @@ -1624,17 +1624,17 @@ ; ZNVER1-LABEL: test_pop_push: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: popl %ds # sched: [100:?] -; ZNVER1-NEXT: popl %es # sched: [100:?] -; ZNVER1-NEXT: popl %ss # sched: [100:?] -; ZNVER1-NEXT: popl %fs # sched: [100:?] -; ZNVER1-NEXT: popl %gs # sched: [100:?] -; ZNVER1-NEXT: pushl %cs # sched: [100:?] -; ZNVER1-NEXT: pushl %ds # sched: [100:?] -; ZNVER1-NEXT: pushl %es # sched: [100:?] -; ZNVER1-NEXT: pushl %ss # sched: [100:?] -; ZNVER1-NEXT: pushl %fs # sched: [100:?] -; ZNVER1-NEXT: pushl %gs # sched: [100:?] +; ZNVER1-NEXT: popl %ds # sched: [100:0.25] +; ZNVER1-NEXT: popl %es # sched: [100:0.25] +; ZNVER1-NEXT: popl %ss # sched: [100:0.25] +; ZNVER1-NEXT: popl %fs # sched: [100:0.25] +; ZNVER1-NEXT: popl %gs # sched: [100:0.25] +; ZNVER1-NEXT: pushl %cs # sched: [100:0.25] +; ZNVER1-NEXT: pushl %ds # sched: [100:0.25] +; ZNVER1-NEXT: pushl %es # sched: [100:0.25] +; ZNVER1-NEXT: pushl %ss # sched: [100:0.25] +; ZNVER1-NEXT: pushl %fs # sched: [100:0.25] +; ZNVER1-NEXT: pushl %gs # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] call void asm sideeffect "pop %DS \0A\09 pop %ES \0A\09 pop %SS \0A\09 pop %FS \0A\09 pop %GS \0A\09 push %CS \0A\09 push %DS \0A\09 push %ES \0A\09 push %SS \0A\09 push %FS \0A\09 push %GS", ""() @@ -1989,9 +1989,9 @@ ; HASWELL-LABEL: test_popa_popf_pusha_pushf: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: popal # sched: [1:?] +; HASWELL-NEXT: popal # sched: [1:4.50] ; HASWELL-NEXT: popfl # sched: [5:0.50] -; HASWELL-NEXT: pushal # sched: [1:?] +; HASWELL-NEXT: pushal # sched: [1:4.75] ; HASWELL-NEXT: pushfl # sched: [1:1.00] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] @@ -2039,10 +2039,10 @@ ; ZNVER1-LABEL: test_popa_popf_pusha_pushf: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: popal # sched: [100:?] -; ZNVER1-NEXT: popfl # sched: [100:?] +; ZNVER1-NEXT: popal # sched: [100:0.25] +; ZNVER1-NEXT: popfl # sched: [100:0.25] ; ZNVER1-NEXT: pushal # sched: [8:0.50] -; ZNVER1-NEXT: pushfl # sched: [100:?] +; ZNVER1-NEXT: pushfl # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] call void asm sideeffect "popa \0A\09 popf \0A\09 pusha \0A\09 pushf", ""() Index: llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll +++ llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll @@ -2761,10 +2761,10 @@ ; HASWELL-NEXT: btcw %si, %di # sched: [1:0.50] ; HASWELL-NEXT: btrw %si, %di # sched: [1:0.50] ; HASWELL-NEXT: btsw %si, %di # sched: [1:0.50] -; HASWELL-NEXT: btw %si, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btcw %si, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btrw %si, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btsw %si, (%rdx) # sched: [1:?] +; HASWELL-NEXT: btw %si, (%rdx) # sched: [1:2.50] +; HASWELL-NEXT: btcw %si, (%rdx) # sched: [1:2.75] +; HASWELL-NEXT: btrw %si, (%rdx) # sched: [1:2.75] +; HASWELL-NEXT: btsw %si, (%rdx) # sched: [1:2.75] ; HASWELL-NEXT: btw $7, %di # sched: [1:0.50] ; HASWELL-NEXT: btcw $7, %di # sched: [1:0.50] ; HASWELL-NEXT: btrw $7, %di # sched: [1:0.50] @@ -2984,10 +2984,10 @@ ; HASWELL-NEXT: btcl %esi, %edi # sched: [1:0.50] ; HASWELL-NEXT: btrl %esi, %edi # sched: [1:0.50] ; HASWELL-NEXT: btsl %esi, %edi # sched: [1:0.50] -; HASWELL-NEXT: btl %esi, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btcl %esi, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btrl %esi, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btsl %esi, (%rdx) # sched: [1:?] +; HASWELL-NEXT: btl %esi, (%rdx) # sched: [1:2.50] +; HASWELL-NEXT: btcl %esi, (%rdx) # sched: [1:2.75] +; HASWELL-NEXT: btrl %esi, (%rdx) # sched: [1:2.75] +; HASWELL-NEXT: btsl %esi, (%rdx) # sched: [1:2.75] ; HASWELL-NEXT: btl $7, %edi # sched: [1:0.50] ; HASWELL-NEXT: btcl $7, %edi # sched: [1:0.50] ; HASWELL-NEXT: btrl $7, %edi # sched: [1:0.50] @@ -3207,10 +3207,10 @@ ; HASWELL-NEXT: btcq %rsi, %rdi # sched: [1:0.50] ; HASWELL-NEXT: btrq %rsi, %rdi # sched: [1:0.50] ; HASWELL-NEXT: btsq %rsi, %rdi # sched: [1:0.50] -; HASWELL-NEXT: btq %rsi, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btcq %rsi, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btrq %rsi, (%rdx) # sched: [1:?] -; HASWELL-NEXT: btsq %rsi, (%rdx) # sched: [1:?] +; HASWELL-NEXT: btq %rsi, (%rdx) # sched: [1:2.50] +; HASWELL-NEXT: btcq %rsi, (%rdx) # sched: [1:2.75] +; HASWELL-NEXT: btrq %rsi, (%rdx) # sched: [1:2.75] +; HASWELL-NEXT: btsq %rsi, (%rdx) # sched: [1:2.75] ; HASWELL-NEXT: btq $7, %rdi # sched: [1:0.50] ; HASWELL-NEXT: btcq $7, %rdi # sched: [1:0.50] ; HASWELL-NEXT: btrq $7, %rdi # sched: [1:0.50] @@ -3465,7 +3465,7 @@ ; GENERIC-LABEL: test_clc_cld_cmc: ; GENERIC: # %bb.0: ; GENERIC-NEXT: #APP -; GENERIC-NEXT: clc # sched: [1:?] +; GENERIC-NEXT: clc # sched: [1:0.25] ; GENERIC-NEXT: cld # sched: [1:0.33] ; GENERIC-NEXT: cmc # sched: [1:0.33] ; GENERIC-NEXT: #NO_APP @@ -3492,7 +3492,7 @@ ; SANDY-LABEL: test_clc_cld_cmc: ; SANDY: # %bb.0: ; SANDY-NEXT: #APP -; SANDY-NEXT: clc # sched: [1:?] +; SANDY-NEXT: clc # sched: [1:0.25] ; SANDY-NEXT: cld # sched: [1:0.33] ; SANDY-NEXT: cmc # sched: [1:0.33] ; SANDY-NEXT: #NO_APP @@ -3501,7 +3501,7 @@ ; HASWELL-LABEL: test_clc_cld_cmc: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: clc # sched: [1:?] +; HASWELL-NEXT: clc # sched: [1:0.25] ; HASWELL-NEXT: cld # sched: [3:1.00] ; HASWELL-NEXT: cmc # sched: [1:0.25] ; HASWELL-NEXT: #NO_APP @@ -3510,7 +3510,7 @@ ; BROADWELL-LABEL: test_clc_cld_cmc: ; BROADWELL: # %bb.0: ; BROADWELL-NEXT: #APP -; BROADWELL-NEXT: clc # sched: [1:?] +; BROADWELL-NEXT: clc # sched: [1:0.25] ; BROADWELL-NEXT: cld # sched: [3:1.00] ; BROADWELL-NEXT: cmc # sched: [1:0.25] ; BROADWELL-NEXT: #NO_APP @@ -3519,7 +3519,7 @@ ; SKYLAKE-LABEL: test_clc_cld_cmc: ; SKYLAKE: # %bb.0: ; SKYLAKE-NEXT: #APP -; SKYLAKE-NEXT: clc # sched: [1:?] +; SKYLAKE-NEXT: clc # sched: [1:0.17] ; SKYLAKE-NEXT: cld # sched: [3:1.00] ; SKYLAKE-NEXT: cmc # sched: [1:0.25] ; SKYLAKE-NEXT: #NO_APP @@ -3528,7 +3528,7 @@ ; SKX-LABEL: test_clc_cld_cmc: ; SKX: # %bb.0: ; SKX-NEXT: #APP -; SKX-NEXT: clc # sched: [1:?] +; SKX-NEXT: clc # sched: [1:0.17] ; SKX-NEXT: cld # sched: [3:1.00] ; SKX-NEXT: cmc # sched: [1:0.25] ; SKX-NEXT: #NO_APP @@ -4292,10 +4292,10 @@ ; ZNVER1-LABEL: test_cmps: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: cmpsb %es:(%rdi), (%rsi) # sched: [100:?] -; ZNVER1-NEXT: cmpsw %es:(%rdi), (%rsi) # sched: [100:?] -; ZNVER1-NEXT: cmpsl %es:(%rdi), (%rsi) # sched: [100:?] -; ZNVER1-NEXT: cmpsq %es:(%rdi), (%rsi) # sched: [100:?] +; ZNVER1-NEXT: cmpsb %es:(%rdi), (%rsi) # sched: [100:0.25] +; ZNVER1-NEXT: cmpsw %es:(%rdi), (%rsi) # sched: [100:0.25] +; ZNVER1-NEXT: cmpsl %es:(%rdi), (%rsi) # sched: [100:0.25] +; ZNVER1-NEXT: cmpsq %es:(%rdi), (%rsi) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "cmpsb \0A\09 cmpsw \0A\09 cmpsl \0A\09 cmpsq", ""() @@ -4711,7 +4711,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: cmpxchg8b (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: cmpxchg16b (%rdi) # sched: [100:?] +; ZNVER1-NEXT: cmpxchg16b (%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "cmpxchg8b $0 \0a\09 cmpxchg16b $0", "*m"(i8 *%a0) nounwind @@ -4785,7 +4785,7 @@ ; ZNVER1-LABEL: test_cpuid: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: cpuid # sched: [100:?] +; ZNVER1-NEXT: cpuid # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "cpuid", ""() nounwind @@ -5366,7 +5366,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: enter $7, $4095 # imm = 0xFFF -; ZNVER1-NEXT: # sched: [100:?] +; ZNVER1-NEXT: # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "enter $0, $1", "i,i"(i8 7, i16 4095) nounwind @@ -6222,12 +6222,12 @@ ; ZNVER1-LABEL: test_in: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: inb $7, %al # sched: [100:?] -; ZNVER1-NEXT: inw $7, %ax # sched: [100:?] -; ZNVER1-NEXT: inl $7, %eax # sched: [100:?] -; ZNVER1-NEXT: inb %dx, %al # sched: [100:?] -; ZNVER1-NEXT: inw %dx, %ax # sched: [100:?] -; ZNVER1-NEXT: inl %dx, %eax # sched: [100:?] +; ZNVER1-NEXT: inb $7, %al # sched: [100:0.25] +; ZNVER1-NEXT: inw $7, %ax # sched: [100:0.25] +; ZNVER1-NEXT: inl $7, %eax # sched: [100:0.25] +; ZNVER1-NEXT: inb %dx, %al # sched: [100:0.25] +; ZNVER1-NEXT: inw %dx, %ax # sched: [100:0.25] +; ZNVER1-NEXT: inl %dx, %eax # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "inb $0, %AL \0A\09 inw $0, %AX \0A\09 inl $0, %EAX \0A\09 inb %DX, %AL \0A\09 inw %DX, %AX \0A\09 inl %DX, %EAX", "i"(i8 7) nounwind @@ -6652,9 +6652,9 @@ ; ZNVER1-LABEL: test_ins: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: insb %dx, %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: insw %dx, %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: insl %dx, %es:(%rdi) # sched: [100:?] +; ZNVER1-NEXT: insb %dx, %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: insw %dx, %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: insl %dx, %es:(%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "insb \0A\09 insw \0A\09 insl", ""() @@ -6728,7 +6728,7 @@ ; ZNVER1-LABEL: test_int: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: int $7 # sched: [100:?] +; ZNVER1-NEXT: int $7 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "int $0", "i"(i8 7) @@ -6811,8 +6811,8 @@ ; ZNVER1-LABEL: test_invlpg_invlpga: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: invlpg (%rdi) # sched: [100:?] -; ZNVER1-NEXT: invlpga %rax, %ecx # sched: [100:?] +; ZNVER1-NEXT: invlpg (%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: invlpga %rax, %ecx # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm sideeffect "invlpg $0 \0A\09 invlpga %rax, %ecx", "*m"(i8 *%a0) nounwind @@ -7365,7 +7365,7 @@ ; ZNVER1-LABEL: test_lahf_sahf: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: lahf # sched: [100:?] +; ZNVER1-NEXT: lahf # sched: [100:0.25] ; ZNVER1-NEXT: sahf # sched: [2:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] @@ -7549,10 +7549,10 @@ ; ZNVER1-LABEL: test_lods: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: lodsb (%rsi), %al # sched: [100:?] -; ZNVER1-NEXT: lodsw (%rsi), %ax # sched: [100:?] -; ZNVER1-NEXT: lodsl (%rsi), %eax # sched: [100:?] -; ZNVER1-NEXT: lodsq (%rsi), %rax # sched: [100:?] +; ZNVER1-NEXT: lodsb (%rsi), %al # sched: [100:0.25] +; ZNVER1-NEXT: lodsw (%rsi), %ax # sched: [100:0.25] +; ZNVER1-NEXT: lodsl (%rsi), %eax # sched: [100:0.25] +; ZNVER1-NEXT: lodsq (%rsi), %rax # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "lodsb \0A\09 lodsw \0A\09 lodsl \0A\09 lodsq", ""() @@ -7843,10 +7843,10 @@ ; ZNVER1-LABEL: test_movs: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: movsb (%rsi), %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: movsw (%rsi), %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: movsl (%rsi), %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: movsq (%rsi), %es:(%rdi) # sched: [100:?] +; ZNVER1-NEXT: movsb (%rsi), %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: movsw (%rsi), %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: movsl (%rsi), %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: movsq (%rsi), %es:(%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "movsb \0A\09 movsw \0A\09 movsl \0A\09 movsq", ""() @@ -8285,13 +8285,13 @@ ; GENERIC-LABEL: test_nop: ; GENERIC: # %bb.0: ; GENERIC-NEXT: #APP -; GENERIC-NEXT: nop # sched: [1:?] -; GENERIC-NEXT: nopw %di # sched: [1:?] -; GENERIC-NEXT: nopw (%rcx) # sched: [1:?] -; GENERIC-NEXT: nopl %esi # sched: [1:?] -; GENERIC-NEXT: nopl (%r8) # sched: [1:?] -; GENERIC-NEXT: nopq %rdx # sched: [1:?] -; GENERIC-NEXT: nopq (%r9) # sched: [1:?] +; GENERIC-NEXT: nop # sched: [1:0.25] +; GENERIC-NEXT: nopw %di # sched: [1:0.25] +; GENERIC-NEXT: nopw (%rcx) # sched: [1:0.25] +; GENERIC-NEXT: nopl %esi # sched: [1:0.25] +; GENERIC-NEXT: nopl (%r8) # sched: [1:0.25] +; GENERIC-NEXT: nopq %rdx # sched: [1:0.25] +; GENERIC-NEXT: nopq (%r9) # sched: [1:0.25] ; GENERIC-NEXT: #NO_APP ; GENERIC-NEXT: retq # sched: [1:1.00] ; @@ -8311,26 +8311,26 @@ ; SLM-LABEL: test_nop: ; SLM: # %bb.0: ; SLM-NEXT: #APP -; SLM-NEXT: nop # sched: [1:?] -; SLM-NEXT: nopw %di # sched: [1:?] -; SLM-NEXT: nopw (%rcx) # sched: [1:?] -; SLM-NEXT: nopl %esi # sched: [1:?] -; SLM-NEXT: nopl (%r8) # sched: [1:?] -; SLM-NEXT: nopq %rdx # sched: [1:?] -; SLM-NEXT: nopq (%r9) # sched: [1:?] +; SLM-NEXT: nop # sched: [1:0.50] +; SLM-NEXT: nopw %di # sched: [1:0.50] +; SLM-NEXT: nopw (%rcx) # sched: [1:0.50] +; SLM-NEXT: nopl %esi # sched: [1:0.50] +; SLM-NEXT: nopl (%r8) # sched: [1:0.50] +; SLM-NEXT: nopq %rdx # sched: [1:0.50] +; SLM-NEXT: nopq (%r9) # sched: [1:0.50] ; SLM-NEXT: #NO_APP ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_nop: ; SANDY: # %bb.0: ; SANDY-NEXT: #APP -; SANDY-NEXT: nop # sched: [1:?] -; SANDY-NEXT: nopw %di # sched: [1:?] -; SANDY-NEXT: nopw (%rcx) # sched: [1:?] -; SANDY-NEXT: nopl %esi # sched: [1:?] -; SANDY-NEXT: nopl (%r8) # sched: [1:?] -; SANDY-NEXT: nopq %rdx # sched: [1:?] -; SANDY-NEXT: nopq (%r9) # sched: [1:?] +; SANDY-NEXT: nop # sched: [1:0.25] +; SANDY-NEXT: nopw %di # sched: [1:0.25] +; SANDY-NEXT: nopw (%rcx) # sched: [1:0.25] +; SANDY-NEXT: nopl %esi # sched: [1:0.25] +; SANDY-NEXT: nopl (%r8) # sched: [1:0.25] +; SANDY-NEXT: nopq %rdx # sched: [1:0.25] +; SANDY-NEXT: nopq (%r9) # sched: [1:0.25] ; SANDY-NEXT: #NO_APP ; SANDY-NEXT: retq # sched: [1:1.00] ; @@ -8402,13 +8402,13 @@ ; ZNVER1-LABEL: test_nop: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: nop # sched: [1:?] -; ZNVER1-NEXT: nopw %di # sched: [1:?] -; ZNVER1-NEXT: nopw (%rcx) # sched: [1:?] -; ZNVER1-NEXT: nopl %esi # sched: [1:?] -; ZNVER1-NEXT: nopl (%r8) # sched: [1:?] -; ZNVER1-NEXT: nopq %rdx # sched: [1:?] -; ZNVER1-NEXT: nopq (%r9) # sched: [1:?] +; ZNVER1-NEXT: nop # sched: [1:0.25] +; ZNVER1-NEXT: nopw %di # sched: [1:0.25] +; ZNVER1-NEXT: nopw (%rcx) # sched: [1:0.25] +; ZNVER1-NEXT: nopl %esi # sched: [1:0.25] +; ZNVER1-NEXT: nopl (%r8) # sched: [1:0.25] +; ZNVER1-NEXT: nopq %rdx # sched: [1:0.25] +; ZNVER1-NEXT: nopq (%r9) # sched: [1:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "nop \0A\09 nopw $0 \0A\09 nopw $3 \0A\09 nopl $1 \0A\09 nopl $4 \0A\09 nopq $2 \0A\09 nopq $5", "r,r,r,*m,*m,*m"(i16 %a0, i32 %a1, i64 %a2, i16 *%p0, i32 *%p1, i64 *%p2) nounwind @@ -9334,12 +9334,12 @@ ; ZNVER1-LABEL: test_out: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: outb %al, $7 # sched: [100:?] -; ZNVER1-NEXT: outw %ax, $7 # sched: [100:?] -; ZNVER1-NEXT: outl %eax, $7 # sched: [100:?] -; ZNVER1-NEXT: outb %al, %dx # sched: [100:?] -; ZNVER1-NEXT: outw %ax, %dx # sched: [100:?] -; ZNVER1-NEXT: outl %eax, %dx # sched: [100:?] +; ZNVER1-NEXT: outb %al, $7 # sched: [100:0.25] +; ZNVER1-NEXT: outw %ax, $7 # sched: [100:0.25] +; ZNVER1-NEXT: outl %eax, $7 # sched: [100:0.25] +; ZNVER1-NEXT: outb %al, %dx # sched: [100:0.25] +; ZNVER1-NEXT: outw %ax, %dx # sched: [100:0.25] +; ZNVER1-NEXT: outl %eax, %dx # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "outb %AL, $0 \0A\09 outw %AX, $0 \0A\09 outl %EAX, $0 \0A\09 outb %AL, %DX \0A\09 outw %AX, %DX \0A\09 outl %EAX, %DX", "i"(i8 7) nounwind @@ -9431,9 +9431,9 @@ ; ZNVER1-LABEL: test_outs: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: outsb (%rsi), %dx # sched: [100:?] -; ZNVER1-NEXT: outsw (%rsi), %dx # sched: [100:?] -; ZNVER1-NEXT: outsl (%rsi), %dx # sched: [100:?] +; ZNVER1-NEXT: outsb (%rsi), %dx # sched: [100:0.25] +; ZNVER1-NEXT: outsw (%rsi), %dx # sched: [100:0.25] +; ZNVER1-NEXT: outsl (%rsi), %dx # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "outsb \0A\09 outsw \0A\09 outsl", ""() @@ -9458,7 +9458,7 @@ ; SLM-LABEL: test_pause: ; SLM: # %bb.0: ; SLM-NEXT: #APP -; SLM-NEXT: pause # sched: [1:?] +; SLM-NEXT: pause # sched: [1:0.50] ; SLM-NEXT: #NO_APP ; SLM-NEXT: retq # sched: [4:1.00] ; @@ -9507,7 +9507,7 @@ ; ZNVER1-LABEL: test_pause: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: pause # sched: [100:?] +; ZNVER1-NEXT: pause # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "pause", ""() @@ -9608,10 +9608,10 @@ ; ZNVER1-LABEL: test_pop_push: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: popq %fs # sched: [100:?] -; ZNVER1-NEXT: popq %gs # sched: [100:?] -; ZNVER1-NEXT: pushq %fs # sched: [100:?] -; ZNVER1-NEXT: pushq %gs # sched: [100:?] +; ZNVER1-NEXT: popq %fs # sched: [100:0.25] +; ZNVER1-NEXT: popq %gs # sched: [100:0.25] +; ZNVER1-NEXT: pushq %fs # sched: [100:0.25] +; ZNVER1-NEXT: pushq %gs # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "pop %FS \0A\09 pop %GS \0A\09 push %FS \0A\09 push %GS", ""() @@ -10136,16 +10136,16 @@ ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: rclb %dil # sched: [1:0.25] ; ZNVER1-NEXT: rcrb %dil # sched: [1:0.25] -; ZNVER1-NEXT: rclb (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrb (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclb (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrb (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rclb $7, %dil # sched: [1:0.25] ; ZNVER1-NEXT: rcrb $7, %dil # sched: [1:0.25] -; ZNVER1-NEXT: rclb $7, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrb $7, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclb $7, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrb $7, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rclb %cl, %dil # sched: [1:0.25] ; ZNVER1-NEXT: rcrb %cl, %dil # sched: [1:0.25] -; ZNVER1-NEXT: rclb %cl, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrb %cl, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclb %cl, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrb %cl, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rclb $0 \0A\09 rcrb $0 \0A\09 rclb $2 \0A\09 rcrb $2 \0A\09 rclb $3, $0 \0A\09 rcrb $3, $0 \0A\09 rclb $3, $2 \0A\09 rcrb $3, $2 \0A\09 rclb %CL, $0 \0A\09 rcrb %CL, $0 \0A\09 rclb %CL, $2 \0A\09 rcrb %CL, $2", "r,r,*m,i"(i8 %a0, i8 %a1, i8 *%a2, i8 7) @@ -10319,16 +10319,16 @@ ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: rclw %di # sched: [1:0.25] ; ZNVER1-NEXT: rcrw %di # sched: [1:0.25] -; ZNVER1-NEXT: rclw (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrw (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclw (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrw (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rclw $7, %di # sched: [1:0.25] ; ZNVER1-NEXT: rcrw $7, %di # sched: [1:0.25] -; ZNVER1-NEXT: rclw $7, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrw $7, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclw $7, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrw $7, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rclw %cl, %di # sched: [1:0.25] ; ZNVER1-NEXT: rcrw %cl, %di # sched: [1:0.25] -; ZNVER1-NEXT: rclw %cl, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrw %cl, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclw %cl, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrw %cl, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rclw $0 \0A\09 rcrw $0 \0A\09 rclw $2 \0A\09 rcrw $2 \0A\09 rclw $3, $0 \0A\09 rcrw $3, $0 \0A\09 rclw $3, $2 \0A\09 rcrw $3, $2 \0A\09 rclw %CL, $0 \0A\09 rcrw %CL, $0 \0A\09 rclw %CL, $2 \0A\09 rcrw %CL, $2", "r,r,*m,i"(i16 %a0, i16 %a1, i16 *%a2, i8 7) @@ -10502,16 +10502,16 @@ ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: rcll %edi # sched: [1:0.25] ; ZNVER1-NEXT: rcrl %edi # sched: [1:0.25] -; ZNVER1-NEXT: rcll (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrl (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rcll (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrl (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rcll $7, %edi # sched: [1:0.25] ; ZNVER1-NEXT: rcrl $7, %edi # sched: [1:0.25] -; ZNVER1-NEXT: rcll $7, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrl $7, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rcll $7, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrl $7, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rcll %cl, %edi # sched: [1:0.25] ; ZNVER1-NEXT: rcrl %cl, %edi # sched: [1:0.25] -; ZNVER1-NEXT: rcll %cl, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrl %cl, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rcll %cl, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrl %cl, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rcll $0 \0A\09 rcrl $0 \0A\09 rcll $2 \0A\09 rcrl $2 \0A\09 rcll $3, $0 \0A\09 rcrl $3, $0 \0A\09 rcll $3, $2 \0A\09 rcrl $3, $2 \0A\09 rcll %CL, $0 \0A\09 rcrl %CL, $0 \0A\09 rcll %CL, $2 \0A\09 rcrl %CL, $2", "r,r,*m,i"(i32 %a0, i32 %a1, i32 *%a2, i8 7) @@ -10685,16 +10685,16 @@ ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: rclq %rdi # sched: [1:0.25] ; ZNVER1-NEXT: rcrq %rdi # sched: [1:0.25] -; ZNVER1-NEXT: rclq (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrq (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclq (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrq (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rclq $7, %rdi # sched: [1:0.25] ; ZNVER1-NEXT: rcrq $7, %rdi # sched: [1:0.25] -; ZNVER1-NEXT: rclq $7, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrq $7, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclq $7, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrq $7, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: rclq %cl, %rdi # sched: [1:0.25] ; ZNVER1-NEXT: rcrq %cl, %rdi # sched: [1:0.25] -; ZNVER1-NEXT: rclq %cl, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: rcrq %cl, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: rclq %cl, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: rcrq %cl, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rclq $0 \0A\09 rcrq $0 \0A\09 rclq $2 \0A\09 rcrq $2 \0A\09 rclq $3, $0 \0A\09 rcrq $3, $0 \0A\09 rclq $3, $2 \0A\09 rcrq $3, $2 \0A\09 rclq %CL, $0 \0A\09 rcrq %CL, $0 \0A\09 rclq %CL, $2 \0A\09 rcrq %CL, $2", "r,r,*m,i"(i64 %a0, i64 %a1, i64 *%a2, i8 7) @@ -10777,8 +10777,8 @@ ; ZNVER1-LABEL: test_rdmsr_wrmsr: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: rdmsr # sched: [100:?] -; ZNVER1-NEXT: wrmsr # sched: [100:?] +; ZNVER1-NEXT: rdmsr # sched: [100:0.25] +; ZNVER1-NEXT: wrmsr # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rdmsr \0A\09 wrmsr", ""() @@ -10817,7 +10817,7 @@ ; HASWELL-LABEL: test_rdpmc: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: rdpmc # sched: [1:?] +; HASWELL-NEXT: rdpmc # sched: [1:8.50] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retq # sched: [7:1.00] ; @@ -10852,7 +10852,7 @@ ; ZNVER1-LABEL: test_rdpmc: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: rdpmc # sched: [100:?] +; ZNVER1-NEXT: rdpmc # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rdpmc", ""() @@ -10935,8 +10935,8 @@ ; ZNVER1-LABEL: test_rdtsc_rdtscp: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: rdtsc # sched: [100:?] -; ZNVER1-NEXT: rdtscp # sched: [100:?] +; ZNVER1-NEXT: rdtsc # sched: [100:0.25] +; ZNVER1-NEXT: rdtscp # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "rdtsc \0A\09 rdtscp", ""() @@ -13510,10 +13510,10 @@ ; ZNVER1-LABEL: test_scas: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: scasb %es:(%rdi), %al # sched: [100:?] -; ZNVER1-NEXT: scasw %es:(%rdi), %ax # sched: [100:?] -; ZNVER1-NEXT: scasl %es:(%rdi), %eax # sched: [100:?] -; ZNVER1-NEXT: scasq %es:(%rdi), %rax # sched: [100:?] +; ZNVER1-NEXT: scasb %es:(%rdi), %al # sched: [100:0.25] +; ZNVER1-NEXT: scasw %es:(%rdi), %ax # sched: [100:0.25] +; ZNVER1-NEXT: scasl %es:(%rdi), %eax # sched: [100:0.25] +; ZNVER1-NEXT: scasq %es:(%rdi), %rax # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "scasb \0A\09 scasw \0A\09 scasl \0A\09 scasq", ""() @@ -14036,10 +14036,10 @@ ; ZNVER1-LABEL: test_shld_shrd_16: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: shldw %cl, %si, %di # sched: [100:?] -; ZNVER1-NEXT: shrdw %cl, %si, %di # sched: [100:?] -; ZNVER1-NEXT: shldw %cl, %si, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: shrdw %cl, %si, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: shldw %cl, %si, %di # sched: [100:0.25] +; ZNVER1-NEXT: shrdw %cl, %si, %di # sched: [100:0.25] +; ZNVER1-NEXT: shldw %cl, %si, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: shrdw %cl, %si, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: shldw $7, %si, %di # sched: [1:0.25] ; ZNVER1-NEXT: shrdw $7, %si, %di # sched: [1:0.25] ; ZNVER1-NEXT: shldw $7, %si, (%rdx) # sched: [5:0.50] @@ -14179,10 +14179,10 @@ ; ZNVER1-LABEL: test_shld_shrd_32: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: shldl %cl, %esi, %edi # sched: [100:?] -; ZNVER1-NEXT: shrdl %cl, %esi, %edi # sched: [100:?] -; ZNVER1-NEXT: shldl %cl, %esi, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: shrdl %cl, %esi, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: shldl %cl, %esi, %edi # sched: [100:0.25] +; ZNVER1-NEXT: shrdl %cl, %esi, %edi # sched: [100:0.25] +; ZNVER1-NEXT: shldl %cl, %esi, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: shrdl %cl, %esi, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: shldl $7, %esi, %edi # sched: [1:0.25] ; ZNVER1-NEXT: shrdl $7, %esi, %edi # sched: [1:0.25] ; ZNVER1-NEXT: shldl $7, %esi, (%rdx) # sched: [5:0.50] @@ -14322,10 +14322,10 @@ ; ZNVER1-LABEL: test_shld_shrd_64: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: shldq %cl, %rsi, %rdi # sched: [100:?] -; ZNVER1-NEXT: shrdq %cl, %rsi, %rdi # sched: [100:?] -; ZNVER1-NEXT: shldq %cl, %rsi, (%rdx) # sched: [100:?] -; ZNVER1-NEXT: shrdq %cl, %rsi, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: shldq %cl, %rsi, %rdi # sched: [100:0.25] +; ZNVER1-NEXT: shrdq %cl, %rsi, %rdi # sched: [100:0.25] +; ZNVER1-NEXT: shldq %cl, %rsi, (%rdx) # sched: [100:0.25] +; ZNVER1-NEXT: shrdq %cl, %rsi, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: shldq $7, %rsi, %rdi # sched: [1:0.25] ; ZNVER1-NEXT: shrdq $7, %rsi, %rdi # sched: [1:0.25] ; ZNVER1-NEXT: shldq $7, %rsi, (%rdx) # sched: [5:0.50] @@ -14521,10 +14521,10 @@ ; ZNVER1-LABEL: test_stos: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: stosb %al, %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: stosw %ax, %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: stosl %eax, %es:(%rdi) # sched: [100:?] -; ZNVER1-NEXT: stosq %rax, %es:(%rdi) # sched: [100:?] +; ZNVER1-NEXT: stosb %al, %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: stosw %ax, %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: stosl %eax, %es:(%rdi) # sched: [100:0.25] +; ZNVER1-NEXT: stosq %rax, %es:(%rdi) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "stosb \0A\09 stosw \0A\09 stosl \0A\09 stosq", ""() @@ -15793,7 +15793,7 @@ ; ZNVER1-LABEL: test_ud2: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: ud2 # sched: [100:?] +; ZNVER1-NEXT: ud2 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] call void asm sideeffect "ud2", ""() @@ -15885,7 +15885,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: xaddb %dil, %sil # sched: [1:0.25] -; ZNVER1-NEXT: xaddb %dil, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: xaddb %dil, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "xaddb $0, $1 \0A\09 xaddb $0, $2", "r,r,*m"(i8 %a0, i8 %a1, i8 *%a2) nounwind @@ -15968,7 +15968,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: xaddw %di, %si # sched: [1:0.25] -; ZNVER1-NEXT: xaddw %di, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: xaddw %di, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "xaddw $0, $1 \0A\09 xaddw $0, $2", "r,r,*m"(i16 %a0, i16 %a1, i16 *%a2) nounwind @@ -16051,7 +16051,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: xaddl %edi, %esi # sched: [1:0.25] -; ZNVER1-NEXT: xaddl %edi, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: xaddl %edi, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "xaddl $0, $1 \0A\09 xaddl $0, $2", "r,r,*m"(i32 %a0, i32 %a1, i32 *%a2) nounwind @@ -16134,7 +16134,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: xaddq %rdi, %rsi # sched: [1:0.25] -; ZNVER1-NEXT: xaddq %rdi, (%rdx) # sched: [100:?] +; ZNVER1-NEXT: xaddq %rdi, (%rdx) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "xaddq $0, $1 \0A\09 xaddq $0, $2", "r,r,*m"(i64 %a0, i64 %a1, i64 *%a2) nounwind @@ -16536,7 +16536,7 @@ ; HASWELL-LABEL: test_xlat: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: xlatb # sched: [7:?] +; HASWELL-NEXT: xlatb # sched: [7:0.75] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retq # sched: [7:1.00] ; @@ -16571,7 +16571,7 @@ ; ZNVER1-LABEL: test_xlat: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: xlatb # sched: [100:?] +; ZNVER1-NEXT: xlatb # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm "xlat", ""() nounwind Index: llvm/trunk/test/CodeGen/X86/sha-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sha-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sha-schedule.ll @@ -188,8 +188,8 @@ ; ; ZNVER1-LABEL: test_sha256msg2: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x i32>, <4 x i32>* %a2 %2 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a0, <4 x i32> %a1) Index: llvm/trunk/test/CodeGen/X86/sse-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sse-schedule.ll @@ -2031,13 +2031,13 @@ ; ZNVER1-SSE-LABEL: test_ldmxcsr: ; ZNVER1-SSE: # %bb.0: ; ZNVER1-SSE-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50] -; ZNVER1-SSE-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?] +; ZNVER1-SSE-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_ldmxcsr: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50] -; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?] +; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = alloca i32, align 4 %2 = bitcast i32* %1 to i8* @@ -5265,13 +5265,13 @@ ; ; ZNVER1-SSE-LABEL: test_stmxcsr: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: stmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?] +; ZNVER1-SSE-NEXT: stmxcsr -{{[0-9]+}}(%rsp) # sched: [100:0.25] ; ZNVER1-SSE-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_stmxcsr: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?] +; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:0.25] ; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = alloca i32, align 4 @@ -6118,7 +6118,7 @@ ; GENERIC-LABEL: test_fnop: ; GENERIC: # %bb.0: ; GENERIC-NEXT: #APP -; GENERIC-NEXT: nop # sched: [1:?] +; GENERIC-NEXT: nop # sched: [1:0.25] ; GENERIC-NEXT: #NO_APP ; GENERIC-NEXT: xorps %xmm0, %xmm0 # sched: [1:1.00] ; GENERIC-NEXT: retq # sched: [1:1.00] @@ -6139,14 +6139,14 @@ ; SLM: # %bb.0: ; SLM-NEXT: xorps %xmm0, %xmm0 # sched: [1:0.50] ; SLM-NEXT: #APP -; SLM-NEXT: nop # sched: [1:?] +; SLM-NEXT: nop # sched: [1:0.50] ; SLM-NEXT: #NO_APP ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-SSE-LABEL: test_fnop: ; SANDY-SSE: # %bb.0: ; SANDY-SSE-NEXT: #APP -; SANDY-SSE-NEXT: nop # sched: [1:?] +; SANDY-SSE-NEXT: nop # sched: [1:0.25] ; SANDY-SSE-NEXT: #NO_APP ; SANDY-SSE-NEXT: xorps %xmm0, %xmm0 # sched: [1:1.00] ; SANDY-SSE-NEXT: retq # sched: [1:1.00] @@ -6154,7 +6154,7 @@ ; SANDY-LABEL: test_fnop: ; SANDY: # %bb.0: ; SANDY-NEXT: #APP -; SANDY-NEXT: nop # sched: [1:?] +; SANDY-NEXT: nop # sched: [1:0.25] ; SANDY-NEXT: #NO_APP ; SANDY-NEXT: vxorps %xmm0, %xmm0, %xmm0 # sched: [1:1.00] ; SANDY-NEXT: retq # sched: [1:1.00] @@ -6225,7 +6225,7 @@ ; ; BTVER2-SSE-LABEL: test_fnop: ; BTVER2-SSE: # %bb.0: -; BTVER2-SSE-NEXT: xorps %xmm0, %xmm0 # sched: [0:?] +; BTVER2-SSE-NEXT: xorps %xmm0, %xmm0 # sched: [0:0.50] ; BTVER2-SSE-NEXT: #APP ; BTVER2-SSE-NEXT: nop # sched: [1:0.50] ; BTVER2-SSE-NEXT: #NO_APP @@ -6233,7 +6233,7 @@ ; ; BTVER2-LABEL: test_fnop: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vxorps %xmm0, %xmm0, %xmm0 # sched: [0:?] +; BTVER2-NEXT: vxorps %xmm0, %xmm0, %xmm0 # sched: [0:0.50] ; BTVER2-NEXT: #APP ; BTVER2-NEXT: nop # sched: [1:0.50] ; BTVER2-NEXT: #NO_APP @@ -6243,7 +6243,7 @@ ; ZNVER1-SSE: # %bb.0: ; ZNVER1-SSE-NEXT: xorps %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-SSE-NEXT: #APP -; ZNVER1-SSE-NEXT: nop # sched: [1:?] +; ZNVER1-SSE-NEXT: nop # sched: [1:0.25] ; ZNVER1-SSE-NEXT: #NO_APP ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; @@ -6251,7 +6251,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vxorps %xmm0, %xmm0, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: nop # sched: [1:?] +; ZNVER1-NEXT: nop # sched: [1:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void asm sideeffect "nop", ""() nounwind Index: llvm/trunk/test/CodeGen/X86/sse2-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse2-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sse2-schedule.ll @@ -3651,12 +3651,12 @@ ; ; ZNVER1-SSE-LABEL: test_maskmovdqu: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: maskmovdqu %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: maskmovdqu %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_maskmovdqu: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2) ret void Index: llvm/trunk/test/CodeGen/X86/sse3-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse3-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sse3-schedule.ll @@ -330,14 +330,14 @@ ; ; ZNVER1-SSE-LABEL: test_haddpd: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: haddpd %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: haddpd (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: haddpd %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: haddpd (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_haddpd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 @@ -439,14 +439,14 @@ ; ; ZNVER1-SSE-LABEL: test_haddps: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: haddps %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: haddps (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: haddps %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: haddps (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_haddps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 @@ -548,14 +548,14 @@ ; ; ZNVER1-SSE-LABEL: test_hsubpd: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: hsubpd %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: hsubpd (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: hsubpd %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: hsubpd (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_hsubpd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 @@ -657,14 +657,14 @@ ; ; ZNVER1-SSE-LABEL: test_hsubps: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: hsubps %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: hsubps (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: hsubps %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: hsubps (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_hsubps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 @@ -875,14 +875,14 @@ ; ZNVER1-SSE: # %bb.0: ; ZNVER1-SSE-NEXT: leaq (%rdi), %rax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl %esi, %ecx # sched: [1:0.25] -; ZNVER1-SSE-NEXT: monitor # sched: [100:?] +; ZNVER1-SSE-NEXT: monitor # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_monitor: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25] ; ZNVER1-NEXT: movl %esi, %ecx # sched: [1:0.25] -; ZNVER1-NEXT: monitor # sched: [100:?] +; ZNVER1-NEXT: monitor # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.sse3.monitor(i8* %a0, i32 %a1, i32 %a2) ret void @@ -1252,8 +1252,8 @@ ; ; ZNVER1-SSE-LABEL: test_movsldup: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: movsldup {{.*#+}} xmm1 = xmm0[0,0,2,2] sched: [100:?] -; ZNVER1-SSE-NEXT: movsldup {{.*#+}} xmm0 = mem[0,0,2,2] sched: [100:?] +; ZNVER1-SSE-NEXT: movsldup {{.*#+}} xmm1 = xmm0[0,0,2,2] sched: [100:0.25] +; ZNVER1-SSE-NEXT: movsldup {{.*#+}} xmm0 = mem[0,0,2,2] sched: [100:0.25] ; ZNVER1-SSE-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; @@ -1380,14 +1380,14 @@ ; ZNVER1-SSE: # %bb.0: ; ZNVER1-SSE-NEXT: movl %edi, %ecx # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl %esi, %eax # sched: [1:0.25] -; ZNVER1-SSE-NEXT: mwait # sched: [100:?] +; ZNVER1-SSE-NEXT: mwait # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_mwait: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl %edi, %ecx # sched: [1:0.25] ; ZNVER1-NEXT: movl %esi, %eax # sched: [1:0.25] -; ZNVER1-NEXT: mwait # sched: [100:?] +; ZNVER1-NEXT: mwait # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] tail call void @llvm.x86.sse3.mwait(i32 %a0, i32 %a1) ret void Index: llvm/trunk/test/CodeGen/X86/sse41-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse41-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sse41-schedule.ll @@ -603,14 +603,14 @@ ; ; ZNVER1-SSE-LABEL: test_dppd: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: dppd $7, %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: dppd $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: dppd $7, %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: dppd $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_dppd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) %2 = load <2 x double>, <2 x double> *%a2, align 16 @@ -706,14 +706,14 @@ ; ; ZNVER1-SSE-LABEL: test_dpps: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: dpps $7, %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: dpps $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: dpps $7, %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: dpps $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_dpps: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) %2 = load <4 x float>, <4 x float> *%a2, align 16 @@ -1101,14 +1101,14 @@ ; ; ZNVER1-SSE-LABEL: test_mpsadbw: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: mpsadbw $7, %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: mpsadbw $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: mpsadbw $7, %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: mpsadbw $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_mpsadbw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) %2 = bitcast <8 x i16> %1 to <16 x i8> Index: llvm/trunk/test/CodeGen/X86/sse42-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/sse42-schedule.ll +++ llvm/trunk/test/CodeGen/X86/sse42-schedule.ll @@ -800,11 +800,11 @@ ; ZNVER1-SSE: # %bb.0: ; ZNVER1-SSE-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl $7, %edx # sched: [1:0.25] -; ZNVER1-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl $7, %edx # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl %ecx, %esi # sched: [1:0.25] -; ZNVER1-SSE-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: # kill: def $ecx killed $ecx def $rcx ; ZNVER1-SSE-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] @@ -813,11 +813,11 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25] -; ZNVER1-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpestri $7, %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25] ; ZNVER1-NEXT: movl %ecx, %esi # sched: [1:0.25] -; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: # kill: def $ecx killed $ecx def $rcx ; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] @@ -974,20 +974,20 @@ ; ZNVER1-SSE: # %bb.0: ; ZNVER1-SSE-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl $7, %edx # sched: [1:0.25] -; ZNVER1-SSE-NEXT: pcmpestrm $7, %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpestrm $7, %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: movl $7, %edx # sched: [1:0.25] -; ZNVER1-SSE-NEXT: pcmpestrm $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpestrm $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_pcmpestrm: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25] -; ZNVER1-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpestrm $7, %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25] ; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25] -; ZNVER1-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 @@ -1125,18 +1125,18 @@ ; ; ZNVER1-SSE-LABEL: test_pcmpistri: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: movl %ecx, %eax # sched: [1:0.25] -; ZNVER1-SSE-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: # kill: def $ecx killed $ecx def $rcx ; ZNVER1-SSE-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_pcmpistri: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: # kill: def $ecx killed $ecx def $rcx ; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] @@ -1235,14 +1235,14 @@ ; ; ZNVER1-SSE-LABEL: test_pcmpistrm: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: pcmpistrm $7, %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: pcmpistrm $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pcmpistrm $7, %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: pcmpistrm $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_pcmpistrm: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) %2 = load <16 x i8>, <16 x i8> *%a2, align 16 @@ -1442,14 +1442,14 @@ ; ; ZNVER1-SSE-LABEL: test_pclmulqdq: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: pclmulqdq $0, %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: pclmulqdq $0, (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: pclmulqdq $0, %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: pclmulqdq $0, (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_pclmulqdq: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <2 x i64>, <2 x i64> *%a2, align 16 %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0) Index: llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll +++ llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll @@ -619,14 +619,14 @@ ; ; ZNVER1-SSE-LABEL: test_phaddd: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: phaddd %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: phaddd (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: phaddd %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: phaddd (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_phaddd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 @@ -728,14 +728,14 @@ ; ; ZNVER1-SSE-LABEL: test_phaddsw: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: phaddsw %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: phaddsw (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: phaddsw %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: phaddsw (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_phaddsw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 @@ -837,14 +837,14 @@ ; ; ZNVER1-SSE-LABEL: test_phaddw: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: phaddw %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: phaddw (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: phaddw %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: phaddw (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_phaddw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 @@ -946,14 +946,14 @@ ; ; ZNVER1-SSE-LABEL: test_phsubd: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: phsubd %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: phsubd (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: phsubd %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: phsubd (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_phsubd: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 @@ -1055,14 +1055,14 @@ ; ; ZNVER1-SSE-LABEL: test_phsubsw: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: phsubsw %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: phsubsw (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: phsubsw %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: phsubsw (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_phsubsw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 @@ -1164,14 +1164,14 @@ ; ; ZNVER1-SSE-LABEL: test_phsubw: ; ZNVER1-SSE: # %bb.0: -; ZNVER1-SSE-NEXT: phsubw %xmm1, %xmm0 # sched: [100:?] -; ZNVER1-SSE-NEXT: phsubw (%rdi), %xmm0 # sched: [100:?] +; ZNVER1-SSE-NEXT: phsubw %xmm1, %xmm0 # sched: [100:0.25] +; ZNVER1-SSE-NEXT: phsubw (%rdi), %xmm0 # sched: [100:0.25] ; ZNVER1-SSE-NEXT: retq # sched: [1:0.50] ; ; ZNVER1-LABEL: test_phsubw: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [100:?] -; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [100:0.25] +; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [100:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1) %2 = load <8 x i16>, <8 x i16> *%a2, align 16 Index: llvm/trunk/test/CodeGen/X86/x87-schedule.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/x87-schedule.ll +++ llvm/trunk/test/CodeGen/X86/x87-schedule.ll @@ -78,7 +78,7 @@ ; ZNVER1-LABEL: test_f2xm1: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: f2xm1 # sched: [100:?] +; ZNVER1-NEXT: f2xm1 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "f2xm1", ""() nounwind @@ -448,7 +448,7 @@ ; HASWELL: # %bb.0: ; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50] ; HASWELL-NEXT: #APP -; HASWELL-NEXT: fbld (%eax) # sched: [47:?] +; HASWELL-NEXT: fbld (%eax) # sched: [47:10.75] ; HASWELL-NEXT: fbstp (%eax) # sched: [1:1.00] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] @@ -493,8 +493,8 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fbld (%eax) # sched: [100:?] -; ZNVER1-NEXT: fbstp (%eax) # sched: [100:?] +; ZNVER1-NEXT: fbld (%eax) # sched: [100:0.25] +; ZNVER1-NEXT: fbstp (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fbld $0 \0A\09 fbstp $0", "*m"(i8 *%a0) nounwind @@ -652,7 +652,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: wait # sched: [1:1.00] -; ZNVER1-NEXT: fnclex # sched: [100:?] +; ZNVER1-NEXT: fnclex # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fclex", ""() nounwind @@ -726,7 +726,7 @@ ; ZNVER1-LABEL: test_fnclex: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fnclex # sched: [100:?] +; ZNVER1-NEXT: fnclex # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fnclex", ""() nounwind @@ -863,14 +863,14 @@ ; ZNVER1-LABEL: test_fcmov: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fcmovb %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmovbe %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmove %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmovnb %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmovnbe %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmovne %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmovnu %st(1), %st(0) # sched: [100:?] -; ZNVER1-NEXT: fcmovu %st(1), %st(0) # sched: [100:?] +; ZNVER1-NEXT: fcmovb %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmovbe %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmove %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmovnb %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmovnbe %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmovne %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmovnu %st(1), %st(0) # sched: [100:0.25] +; ZNVER1-NEXT: fcmovu %st(1), %st(0) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fcmovb %st(1), %st(0) \0A\09 fcmovbe %st(1), %st(0) \0A\09 fcmove %st(1), %st(0) \0A\09 fcmovnb %st(1), %st(0) \0A\09 fcmovnbe %st(1), %st(0) \0A\09 fcmovne %st(1), %st(0) \0A\09 fcmovnu %st(1), %st(0) \0A\09 fcmovu %st(1), %st(0)", ""() nounwind @@ -1286,7 +1286,7 @@ ; ZNVER1-LABEL: test_fcos: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fcos # sched: [100:?] +; ZNVER1-NEXT: fcos # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fcos", ""() nounwind @@ -2336,7 +2336,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: wait # sched: [1:1.00] -; ZNVER1-NEXT: fninit # sched: [100:?] +; ZNVER1-NEXT: fninit # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "finit", ""() nounwind @@ -2410,7 +2410,7 @@ ; ZNVER1-LABEL: test_fninit: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fninit # sched: [100:?] +; ZNVER1-NEXT: fninit # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fninit", ""() nounwind @@ -2811,8 +2811,8 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fldcw (%eax) # sched: [100:?] -; ZNVER1-NEXT: fldenv (%eax) # sched: [100:?] +; ZNVER1-NEXT: fldcw (%eax) # sched: [100:0.25] +; ZNVER1-NEXT: fldenv (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fldcw $0 \0A\09 fldenv $0", "*m"(i8* %a0) nounwind @@ -3219,7 +3219,7 @@ ; SLM-LABEL: test_fnop: ; SLM: # %bb.0: ; SLM-NEXT: #APP -; SLM-NEXT: fnop # sched: [1:?] +; SLM-NEXT: fnop # sched: [1:0.50] ; SLM-NEXT: #NO_APP ; SLM-NEXT: retl # sched: [4:1.00] ; @@ -3342,7 +3342,7 @@ ; ZNVER1-LABEL: test_fpatan: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fpatan # sched: [100:?] +; ZNVER1-NEXT: fpatan # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fpatan", ""() nounwind @@ -3385,8 +3385,8 @@ ; HASWELL-LABEL: test_fprem_fprem1: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: fprem # sched: [19:?] -; HASWELL-NEXT: fprem1 # sched: [27:?] +; HASWELL-NEXT: fprem # sched: [19:7.00] +; HASWELL-NEXT: fprem1 # sched: [27:10.25] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -3425,8 +3425,8 @@ ; ZNVER1-LABEL: test_fprem_fprem1: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fprem # sched: [100:?] -; ZNVER1-NEXT: fprem1 # sched: [100:?] +; ZNVER1-NEXT: fprem # sched: [100:0.25] +; ZNVER1-NEXT: fprem1 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fprem \0A\09 fprem1", ""() nounwind @@ -3500,7 +3500,7 @@ ; ZNVER1-LABEL: test_fptan: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fptan # sched: [100:?] +; ZNVER1-NEXT: fptan # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fptan", ""() nounwind @@ -3539,7 +3539,7 @@ ; HASWELL-LABEL: test_frndint: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: frndint # sched: [11:?] +; HASWELL-NEXT: frndint # sched: [11:4.25] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -3574,7 +3574,7 @@ ; ZNVER1-LABEL: test_frndint: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: frndint # sched: [100:?] +; ZNVER1-NEXT: frndint # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "frndint", ""() nounwind @@ -3618,7 +3618,7 @@ ; HASWELL: # %bb.0: ; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50] ; HASWELL-NEXT: #APP -; HASWELL-NEXT: frstor (%eax) # sched: [1:?] +; HASWELL-NEXT: frstor (%eax) # sched: [1:22.50] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -3658,7 +3658,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: frstor (%eax) # sched: [100:?] +; ZNVER1-NEXT: frstor (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "frstor $0", "*m"(i8* %a0) nounwind @@ -3707,7 +3707,7 @@ ; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50] ; HASWELL-NEXT: #APP ; HASWELL-NEXT: wait # sched: [2:0.50] -; HASWELL-NEXT: fnsave (%eax) # sched: [1:?] +; HASWELL-NEXT: fnsave (%eax) # sched: [1:36.75] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -3752,7 +3752,7 @@ ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: wait # sched: [1:1.00] -; ZNVER1-NEXT: fnsave (%eax) # sched: [100:?] +; ZNVER1-NEXT: fnsave (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fsave $0", "*m"(i8* %a0) nounwind @@ -3796,7 +3796,7 @@ ; HASWELL: # %bb.0: ; HASWELL-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [5:0.50] ; HASWELL-NEXT: #APP -; HASWELL-NEXT: fnsave (%eax) # sched: [1:?] +; HASWELL-NEXT: fnsave (%eax) # sched: [1:36.75] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -3836,7 +3836,7 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fnsave (%eax) # sched: [100:?] +; ZNVER1-NEXT: fnsave (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fnsave $0", "*m"(i8* %a0) nounwind @@ -3875,7 +3875,7 @@ ; HASWELL-LABEL: test_fscale: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: fscale # sched: [75:?] +; HASWELL-NEXT: fscale # sched: [75:12.50] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -3910,7 +3910,7 @@ ; ZNVER1-LABEL: test_fscale: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fscale # sched: [100:?] +; ZNVER1-NEXT: fscale # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fscale", ""() nounwind @@ -3984,7 +3984,7 @@ ; ZNVER1-LABEL: test_fsin: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fsin # sched: [100:?] +; ZNVER1-NEXT: fsin # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fsin", ""() nounwind @@ -4058,7 +4058,7 @@ ; ZNVER1-LABEL: test_fsincos: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fsincos # sched: [100:?] +; ZNVER1-NEXT: fsincos # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fsincos", ""() nounwind @@ -4426,11 +4426,11 @@ ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP ; ZNVER1-NEXT: wait # sched: [1:1.00] -; ZNVER1-NEXT: fnstcw (%eax) # sched: [100:?] +; ZNVER1-NEXT: fnstcw (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: wait # sched: [1:1.00] -; ZNVER1-NEXT: fnstenv (%eax) # sched: [100:?] +; ZNVER1-NEXT: fnstenv (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: wait # sched: [1:1.00] -; ZNVER1-NEXT: fnstsw (%eax) # sched: [100:?] +; ZNVER1-NEXT: fnstsw (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fstcw $0 \0A\09 fstenv $0 \0A\09 fstsw $0", "*m"(i8* %a0) nounwind @@ -4532,9 +4532,9 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fnstcw (%eax) # sched: [100:?] -; ZNVER1-NEXT: fnstenv (%eax) # sched: [100:?] -; ZNVER1-NEXT: fnstsw (%eax) # sched: [100:?] +; ZNVER1-NEXT: fnstcw (%eax) # sched: [100:0.25] +; ZNVER1-NEXT: fnstenv (%eax) # sched: [100:0.25] +; ZNVER1-NEXT: fnstsw (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fnstcw $0 \0A\09 fnstenv $0 \0A\09 fnstsw $0", "*m"(i8* %a0) nounwind @@ -5627,8 +5627,8 @@ ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: movl {{[0-9]+}}(%esp), %eax # sched: [8:0.50] ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fxrstor (%eax) # sched: [100:?] -; ZNVER1-NEXT: fxsave (%eax) # sched: [100:?] +; ZNVER1-NEXT: fxrstor (%eax) # sched: [100:0.25] +; ZNVER1-NEXT: fxsave (%eax) # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fxrstor $0 \0A\09 fxsave $0", "*m"(i8 *%a0) nounwind @@ -5667,7 +5667,7 @@ ; HASWELL-LABEL: test_fxtract: ; HASWELL: # %bb.0: ; HASWELL-NEXT: #APP -; HASWELL-NEXT: fxtract # sched: [15:?] +; HASWELL-NEXT: fxtract # sched: [15:4.25] ; HASWELL-NEXT: #NO_APP ; HASWELL-NEXT: retl # sched: [7:1.00] ; @@ -5702,7 +5702,7 @@ ; ZNVER1-LABEL: test_fxtract: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fxtract # sched: [100:?] +; ZNVER1-NEXT: fxtract # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fxtract", ""() nounwind @@ -5776,7 +5776,7 @@ ; ZNVER1-LABEL: test_fyl2x: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fyl2x # sched: [100:?] +; ZNVER1-NEXT: fyl2x # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fyl2x", ""() nounwind @@ -5850,7 +5850,7 @@ ; ZNVER1-LABEL: test_fyl2xp1: ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: #APP -; ZNVER1-NEXT: fyl2xp1 # sched: [100:?] +; ZNVER1-NEXT: fyl2xp1 # sched: [100:0.25] ; ZNVER1-NEXT: #NO_APP ; ZNVER1-NEXT: retl # sched: [1:0.50] tail call void asm sideeffect "fyl2xp1", ""() nounwind Index: llvm/trunk/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s +++ llvm/trunk/test/tools/llvm-mca/AArch64/Exynos/direct-branch.s @@ -26,7 +26,9 @@ # ALL-NEXT: [6]: HasSideEffects # ALL: [1] [2] [3] [4] [5] [6] Instructions: -# ALL-NEXT: 1 0 - b t + +# M1-NEXT: 1 0 0.25 b t +# M3-NEXT: 1 0 0.17 b t # ALL: Timeline view: # ALL-NEXT: Index 01 Index: llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-x86_64.s @@ -747,7 +747,7 @@ # CHECK-NEXT: 4 6 1.00 * * btcq $7, (%rax) # CHECK-NEXT: 4 6 1.00 * * btrq $7, (%rax) # CHECK-NEXT: 4 6 1.00 * * btsq $7, (%rax) -# CHECK-NEXT: 1 1 - * clc +# CHECK-NEXT: 1 1 0.25 * clc # CHECK-NEXT: 1 1 0.25 decb %dil # CHECK-NEXT: 3 7 1.00 * * decb (%rax) # CHECK-NEXT: 1 1 0.25 decw %di Index: llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s +++ llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s @@ -1699,8 +1699,8 @@ # CHECK-NEXT: 1 6 1.00 * vxorps (%rax), %xmm1, %xmm2 # CHECK-NEXT: 2 1 1.00 vxorps %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 2 6 2.00 * vxorps (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 73 90 - * * * vzeroall -# CHECK-NEXT: 37 46 - * * * vzeroupper +# CHECK-NEXT: 73 90 36.50 * * * vzeroall +# CHECK-NEXT: 37 46 18.50 * * * vzeroupper # CHECK: Resources: # CHECK-NEXT: [0] - JALU0 Index: llvm/trunk/test/tools/llvm-mca/X86/BtVer2/zero-idioms.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/BtVer2/zero-idioms.s +++ llvm/trunk/test/tools/llvm-mca/X86/BtVer2/zero-idioms.s @@ -28,15 +28,15 @@ # CHECK-NEXT: [6]: HasSideEffects # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 1 0 - xorps %xmm0, %xmm0 -# CHECK-NEXT: 1 0 - xorpd %xmm1, %xmm1 -# CHECK-NEXT: 1 0 - vxorps %xmm2, %xmm2, %xmm2 -# CHECK-NEXT: 1 0 - vxorpd %xmm1, %xmm1, %xmm1 -# CHECK-NEXT: 1 0 - pxor %xmm2, %xmm2 -# CHECK-NEXT: 1 0 - vpxor %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: 1 0 - vxorps %xmm4, %xmm4, %xmm5 -# CHECK-NEXT: 1 0 - vxorpd %xmm1, %xmm1, %xmm3 -# CHECK-NEXT: 1 0 - vpxor %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: 1 0 0.50 xorps %xmm0, %xmm0 +# CHECK-NEXT: 1 0 0.50 xorpd %xmm1, %xmm1 +# CHECK-NEXT: 1 0 0.50 vxorps %xmm2, %xmm2, %xmm2 +# CHECK-NEXT: 1 0 0.50 vxorpd %xmm1, %xmm1, %xmm1 +# CHECK-NEXT: 1 0 0.50 pxor %xmm2, %xmm2 +# CHECK-NEXT: 1 0 0.50 vpxor %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: 1 0 0.50 vxorps %xmm4, %xmm4, %xmm5 +# CHECK-NEXT: 1 0 0.50 vxorpd %xmm1, %xmm1, %xmm3 +# CHECK-NEXT: 1 0 0.50 vpxor %xmm3, %xmm3, %xmm5 # CHECK: Register File statistics: # CHECK-NEXT: Total number of mappings created: 0 Index: llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-x86_64.s @@ -703,10 +703,10 @@ # CHECK-NEXT: 1 1 0.50 btcw %si, %di # CHECK-NEXT: 1 1 0.50 btrw %si, %di # CHECK-NEXT: 1 1 0.50 btsw %si, %di -# CHECK-NEXT: 10 1 - * btw %si, (%rax) -# CHECK-NEXT: 11 1 - * * btcw %si, (%rax) -# CHECK-NEXT: 11 1 - * * btrw %si, (%rax) -# CHECK-NEXT: 11 1 - * * btsw %si, (%rax) +# CHECK-NEXT: 10 1 2.50 * btw %si, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btcw %si, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btrw %si, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btsw %si, (%rax) # CHECK-NEXT: 1 1 0.50 btw $7, %di # CHECK-NEXT: 1 1 0.50 btcw $7, %di # CHECK-NEXT: 1 1 0.50 btrw $7, %di @@ -719,10 +719,10 @@ # CHECK-NEXT: 1 1 0.50 btcl %esi, %edi # CHECK-NEXT: 1 1 0.50 btrl %esi, %edi # CHECK-NEXT: 1 1 0.50 btsl %esi, %edi -# CHECK-NEXT: 10 1 - * btl %esi, (%rax) -# CHECK-NEXT: 11 1 - * * btcl %esi, (%rax) -# CHECK-NEXT: 11 1 - * * btrl %esi, (%rax) -# CHECK-NEXT: 11 1 - * * btsl %esi, (%rax) +# CHECK-NEXT: 10 1 2.50 * btl %esi, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btcl %esi, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btrl %esi, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btsl %esi, (%rax) # CHECK-NEXT: 1 1 0.50 btl $7, %edi # CHECK-NEXT: 1 1 0.50 btcl $7, %edi # CHECK-NEXT: 1 1 0.50 btrl $7, %edi @@ -735,10 +735,10 @@ # CHECK-NEXT: 1 1 0.50 btcq %rsi, %rdi # CHECK-NEXT: 1 1 0.50 btrq %rsi, %rdi # CHECK-NEXT: 1 1 0.50 btsq %rsi, %rdi -# CHECK-NEXT: 10 1 - * btq %rsi, (%rax) -# CHECK-NEXT: 11 1 - * * btcq %rsi, (%rax) -# CHECK-NEXT: 11 1 - * * btrq %rsi, (%rax) -# CHECK-NEXT: 11 1 - * * btsq %rsi, (%rax) +# CHECK-NEXT: 10 1 2.50 * btq %rsi, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btcq %rsi, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btrq %rsi, (%rax) +# CHECK-NEXT: 11 1 2.75 * * btsq %rsi, (%rax) # CHECK-NEXT: 1 1 0.50 btq $7, %rdi # CHECK-NEXT: 1 1 0.50 btcq $7, %rdi # CHECK-NEXT: 1 1 0.50 btrq $7, %rdi @@ -747,7 +747,7 @@ # CHECK-NEXT: 4 7 1.00 * * btcq $7, (%rax) # CHECK-NEXT: 4 7 1.00 * * btrq $7, (%rax) # CHECK-NEXT: 4 7 1.00 * * btsq $7, (%rax) -# CHECK-NEXT: 1 1 - * clc +# CHECK-NEXT: 1 1 0.25 * clc # CHECK-NEXT: 1 1 0.25 decb %dil # CHECK-NEXT: 3 7 1.00 * * decb (%rax) # CHECK-NEXT: 1 1 0.25 decw %di Index: llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-x87.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-x87.s +++ llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-x87.s @@ -216,7 +216,7 @@ # CHECK-NEXT: 1 3 1.00 * faddp %st(2) # CHECK-NEXT: 3 13 2.00 * * fiadds (%ecx) # CHECK-NEXT: 3 13 2.00 * * fiaddl (%ecx) -# CHECK-NEXT: 43 47 - * fbld (%ecx) +# CHECK-NEXT: 43 47 10.75 * fbld (%ecx) # CHECK-NEXT: 2 1 1.00 * fbstp (%eax) # CHECK-NEXT: 1 1 1.00 * fchs # CHECK-NEXT: 4 4 1.00 * fnclex @@ -298,13 +298,13 @@ # CHECK-NEXT: 3 15 1.00 * * fimull (%eax) # CHECK-NEXT: 1 1 0.50 * fnop # CHECK-NEXT: 1 100 0.25 * fpatan -# CHECK-NEXT: 28 19 - * fprem -# CHECK-NEXT: 41 27 - * fprem1 +# CHECK-NEXT: 28 19 7.00 * fprem +# CHECK-NEXT: 41 27 10.25 * fprem1 # CHECK-NEXT: 1 100 0.25 * fptan -# CHECK-NEXT: 17 11 - * frndint -# CHECK-NEXT: 90 1 - * frstor (%eax) -# CHECK-NEXT: 147 1 - * fnsave (%eax) -# CHECK-NEXT: 50 75 - * fscale +# CHECK-NEXT: 17 11 4.25 * frndint +# CHECK-NEXT: 90 1 22.50 * frstor (%eax) +# CHECK-NEXT: 147 1 36.75 * fnsave (%eax) +# CHECK-NEXT: 50 75 12.50 * fscale # CHECK-NEXT: 1 100 0.25 * fsin # CHECK-NEXT: 1 100 0.25 * fsincos # CHECK-NEXT: 1 23 17.00 * fsqrt @@ -318,9 +318,9 @@ # CHECK-NEXT: 3 2 1.00 * * fnstcw (%eax) # CHECK-NEXT: 100 115 19.50 * fnstenv (%eax) # CHECK-NEXT: 3 4 1.00 * fnstsw (%eax) -# CHECK-NEXT: 90 1 - * frstor (%eax) +# CHECK-NEXT: 90 1 22.50 * frstor (%eax) # CHECK-NEXT: 2 2 0.50 * wait -# CHECK-NEXT: 147 1 - * fnsave (%eax) +# CHECK-NEXT: 147 1 36.75 * fnsave (%eax) # CHECK-NEXT: 1 3 1.00 * fsub %st(0), %st(1) # CHECK-NEXT: 1 3 1.00 * fsub %st(2) # CHECK-NEXT: 2 10 1.00 * * fsubs (%ecx) @@ -351,7 +351,7 @@ # CHECK-NEXT: 15 17 4.00 * fxch %st(3) # CHECK-NEXT: 90 64 16.50 * * * fxrstor (%eax) # CHECK-NEXT: 1 100 0.25 * * * fxsave (%eax) -# CHECK-NEXT: 17 15 - * fxtract +# CHECK-NEXT: 17 15 4.25 * fxtract # CHECK-NEXT: 1 100 0.25 * fyl2x # CHECK-NEXT: 1 100 0.25 * fyl2xp1 Index: llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-x86_64.s @@ -822,13 +822,13 @@ # CHECK-NEXT: 2 5 2.00 * * negl (%rax) # CHECK-NEXT: 1 1 0.50 negq %rcx # CHECK-NEXT: 2 5 2.00 * * negq (%r10) -# CHECK-NEXT: 1 1 - nop -# CHECK-NEXT: 1 1 - nopw %di -# CHECK-NEXT: 1 1 - nopw (%rcx) -# CHECK-NEXT: 1 1 - nopl %esi -# CHECK-NEXT: 1 1 - nopl (%r8) -# CHECK-NEXT: 1 1 - nopq %rdx -# CHECK-NEXT: 1 1 - nopq (%r9) +# CHECK-NEXT: 1 1 0.50 nop +# CHECK-NEXT: 1 1 0.50 nopw %di +# CHECK-NEXT: 1 1 0.50 nopw (%rcx) +# CHECK-NEXT: 1 1 0.50 nopl %esi +# CHECK-NEXT: 1 1 0.50 nopl (%r8) +# CHECK-NEXT: 1 1 0.50 nopq %rdx +# CHECK-NEXT: 1 1 0.50 nopq (%r9) # CHECK-NEXT: 1 1 0.50 notb %dil # CHECK-NEXT: 2 5 2.00 * * notb (%r8) # CHECK-NEXT: 1 1 0.50 notw %si Index: llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-x87.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-x87.s +++ llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-x87.s @@ -296,7 +296,7 @@ # CHECK-NEXT: 1 5 2.00 * fmulp %st(2) # CHECK-NEXT: 1 8 2.00 * * fimuls (%ecx) # CHECK-NEXT: 1 8 2.00 * * fimull (%eax) -# CHECK-NEXT: 1 1 - * fnop +# CHECK-NEXT: 1 1 0.50 * fnop # CHECK-NEXT: 1 100 1.00 * fpatan # CHECK-NEXT: 1 100 1.00 * fprem # CHECK-NEXT: 1 100 1.00 * fprem1 Index: llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-x86_64.s @@ -747,7 +747,7 @@ # CHECK-NEXT: 4 7 1.00 * * btcq $7, (%rax) # CHECK-NEXT: 4 7 1.00 * * btrq $7, (%rax) # CHECK-NEXT: 4 7 1.00 * * btsq $7, (%rax) -# CHECK-NEXT: 1 1 - * clc +# CHECK-NEXT: 1 1 0.25 * clc # CHECK-NEXT: 1 1 0.33 decb %dil # CHECK-NEXT: 3 7 1.00 * * decb (%rax) # CHECK-NEXT: 1 1 0.33 decw %di @@ -822,13 +822,13 @@ # CHECK-NEXT: 3 7 1.00 * * negl (%rax) # CHECK-NEXT: 1 1 0.33 negq %rcx # CHECK-NEXT: 3 7 1.00 * * negq (%r10) -# CHECK-NEXT: 1 1 - nop -# CHECK-NEXT: 1 1 - nopw %di -# CHECK-NEXT: 1 1 - nopw (%rcx) -# CHECK-NEXT: 1 1 - nopl %esi -# CHECK-NEXT: 1 1 - nopl (%r8) -# CHECK-NEXT: 1 1 - nopq %rdx -# CHECK-NEXT: 1 1 - nopq (%r9) +# CHECK-NEXT: 1 1 0.25 nop +# CHECK-NEXT: 1 1 0.25 nopw %di +# CHECK-NEXT: 1 1 0.25 nopw (%rcx) +# CHECK-NEXT: 1 1 0.25 nopl %esi +# CHECK-NEXT: 1 1 0.25 nopl (%r8) +# CHECK-NEXT: 1 1 0.25 nopq %rdx +# CHECK-NEXT: 1 1 0.25 nopq (%r9) # CHECK-NEXT: 1 1 0.33 notb %dil # CHECK-NEXT: 3 7 1.00 * * notb (%r8) # CHECK-NEXT: 1 1 0.33 notw %si Index: llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-x86_64.s @@ -747,7 +747,7 @@ # CHECK-NEXT: 4 6 1.00 * * btcq $7, (%rax) # CHECK-NEXT: 4 6 1.00 * * btrq $7, (%rax) # CHECK-NEXT: 4 6 1.00 * * btsq $7, (%rax) -# CHECK-NEXT: 1 1 - * clc +# CHECK-NEXT: 1 1 0.17 * clc # CHECK-NEXT: 1 1 0.25 decb %dil # CHECK-NEXT: 3 7 1.00 * * decb (%rax) # CHECK-NEXT: 1 1 0.25 decw %di Index: llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-x86_64.s @@ -747,7 +747,7 @@ # CHECK-NEXT: 4 6 1.00 * * btcq $7, (%rax) # CHECK-NEXT: 4 6 1.00 * * btrq $7, (%rax) # CHECK-NEXT: 4 6 1.00 * * btsq $7, (%rax) -# CHECK-NEXT: 1 1 - * clc +# CHECK-NEXT: 1 1 0.17 * clc # CHECK-NEXT: 1 1 0.25 decb %dil # CHECK-NEXT: 3 7 1.00 * * decb (%rax) # CHECK-NEXT: 1 1 0.25 decw %di Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s @@ -1174,40 +1174,40 @@ # CHECK-NEXT: 1 22 1.00 * vdivsd (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 15 1.00 vdivss %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 22 1.00 * vdivss (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vdppd $22, %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vdppd $22, (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vdpps $22, %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vdpps $22, (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vdpps $22, %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vdpps $22, (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vdppd $22, %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vdppd $22, (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vdpps $22, %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vdpps $22, (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vdpps $22, %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vdpps $22, (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 1 0.33 vextractf128 $1, %ymm0, %xmm2 # CHECK-NEXT: 2 8 0.50 * vextractf128 $1, %ymm0, (%rax) # CHECK-NEXT: 2 2 2.00 vextractps $1, %xmm0, %ecx # CHECK-NEXT: 2 5 2.50 * vextractps $1, %xmm0, (%rax) -# CHECK-NEXT: 1 100 - vhaddpd %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vhaddpd (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vhaddpd %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vhaddpd (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vhaddps %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vhaddps (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vhaddps %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vhaddps (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vhsubpd %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vhsubpd (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vhsubpd %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vhsubpd (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vhsubps %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vhsubps (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vhsubps %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vhsubps (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vhaddpd %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vhaddpd (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vhaddpd %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vhaddpd (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vhaddps %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vhaddps (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vhaddps %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vhaddps (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vhsubpd %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vhsubpd (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vhsubpd %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vhsubpd (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vhsubps %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vhsubps (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vhsubps %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vhsubps (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 2 0.67 vinsertf128 $1, %xmm0, %ymm1, %ymm2 # CHECK-NEXT: 2 9 0.67 * vinsertf128 $1, (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 1 0.50 vinsertps $1, %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vinsertps $1, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vlddqu (%rax), %xmm2 # CHECK-NEXT: 1 8 0.50 * vlddqu (%rax), %ymm2 -# CHECK-NEXT: 1 100 - * * * vldmxcsr (%rax) -# CHECK-NEXT: 1 100 - * * * vmaskmovdqu %xmm0, %xmm1 +# CHECK-NEXT: 1 100 0.25 * * * vldmxcsr (%rax) +# CHECK-NEXT: 1 100 0.25 * * * vmaskmovdqu %xmm0, %xmm1 # CHECK-NEXT: 1 8 0.50 * vmaskmovpd (%rax), %xmm0, %xmm2 # CHECK-NEXT: 2 8 1.00 * vmaskmovpd (%rax), %ymm0, %ymm2 # CHECK-NEXT: 1 4 0.50 * * vmaskmovpd %xmm0, %xmm1, (%rax) @@ -1325,8 +1325,8 @@ # CHECK-NEXT: 1 1 0.25 vmovups %ymm0, %ymm2 # CHECK-NEXT: 1 1 0.50 * vmovups %ymm0, (%rax) # CHECK-NEXT: 1 8 0.50 * vmovups (%rax), %ymm2 -# CHECK-NEXT: 1 100 - vmpsadbw $1, %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vmpsadbw $1, (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vmpsadbw $1, %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vmpsadbw $1, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 3 0.50 vmulpd %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 2 10 0.50 * vmulpd (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 4 0.50 vmulpd %ymm0, %ymm1, %ymm2 @@ -1391,8 +1391,8 @@ # CHECK-NEXT: 1 8 1.00 * vpblendvb %xmm3, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.33 vpblendw $11, %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 2 8 0.50 * vpblendw $11, (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vpclmulqdq $11, %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vpclmulqdq $11, (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vpclmulqdq $11, %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vpclmulqdq $11, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpeqb %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpcmpeqb (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpeqd %xmm0, %xmm1, %xmm2 @@ -1409,8 +1409,8 @@ # CHECK-NEXT: 1 8 0.50 * vpcmpgtq (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpgtw %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpcmpgtw (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vperm2f128 $1, %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vperm2f128 $1, (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vperm2f128 $1, %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vperm2f128 $1, (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 1 0.50 vpermilpd $1, %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpermilpd $1, (%rax), %xmm2 # CHECK-NEXT: 1 1 0.50 vpermilpd %xmm0, %xmm1, %xmm2 @@ -1435,20 +1435,20 @@ # CHECK-NEXT: 2 5 3.00 * vpextrq $1, %xmm0, (%rax) # CHECK-NEXT: 1 2 2.00 vpextrw $1, %xmm0, %ecx # CHECK-NEXT: 2 5 3.00 * vpextrw $1, %xmm0, (%rax) -# CHECK-NEXT: 1 100 - vphaddd %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vphaddd (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vphaddsw %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vphaddsw (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vphaddw %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vphaddw (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vphaddd %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vphaddd (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vphaddsw %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vphaddsw (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vphaddw %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vphaddw (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 4 1.00 vphminposuw %xmm0, %xmm2 # CHECK-NEXT: 1 11 1.00 * vphminposuw (%rax), %xmm2 -# CHECK-NEXT: 1 100 - vphsubd %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vphsubd (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vphsubsw %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vphsubsw (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - vphsubw %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * vphsubw (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vphsubd %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vphsubd (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vphsubsw %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vphsubsw (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 vphsubw %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vphsubw (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpinsrb $1, %eax, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpinsrb $1, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpinsrd $1, %eax, %xmm1, %xmm2 @@ -1650,7 +1650,7 @@ # CHECK-NEXT: 1 27 20.00 * vsqrtsd (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 20 20.00 vsqrtss %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 27 20.00 * vsqrtss (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 1 100 - * * * vstmxcsr (%rax) +# CHECK-NEXT: 1 100 0.25 * * * vstmxcsr (%rax) # CHECK-NEXT: 1 3 1.00 vsubpd %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 10 1.00 * vsubpd (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 3 1.00 vsubpd %ymm0, %ymm1, %ymm2 @@ -1699,8 +1699,8 @@ # CHECK-NEXT: 1 8 0.50 * vxorps (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vxorps %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 1 8 0.50 * vxorps (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * * * vzeroall -# CHECK-NEXT: 1 100 - * * * vzeroupper +# CHECK-NEXT: 1 100 0.25 * * * vzeroall +# CHECK-NEXT: 1 100 0.25 * * * vzeroupper # CHECK: Resources: # CHECK-NEXT: [0] - ZnAGU0 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx2.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx2.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx2.s @@ -462,19 +462,19 @@ # CHECK-NEXT: 1 100 0.25 vbroadcastss %xmm0, %ymm0 # CHECK-NEXT: 1 2 0.25 vextracti128 $1, %ymm0, %xmm2 # CHECK-NEXT: 1 1 0.50 * vextracti128 $1, %ymm0, (%rax) -# CHECK-NEXT: 1 100 - * vgatherdpd %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vgatherdpd %ymm0, (%rax,%xmm1,2), %ymm2 -# CHECK-NEXT: 1 100 - * vgatherdps %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vgatherdps %ymm0, (%rax,%ymm1,2), %ymm2 -# CHECK-NEXT: 1 100 - * vgatherqpd %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vgatherqpd %ymm0, (%rax,%ymm1,2), %ymm2 -# CHECK-NEXT: 1 100 - * vgatherqps %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vgatherqps %xmm0, (%rax,%ymm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vgatherdpd %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vgatherdpd %ymm0, (%rax,%xmm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 * vgatherdps %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vgatherdps %ymm0, (%rax,%ymm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 * vgatherqpd %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vgatherqpd %ymm0, (%rax,%ymm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 * vgatherqps %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vgatherqps %xmm0, (%rax,%ymm1,2), %xmm2 # CHECK-NEXT: 1 2 0.25 vinserti128 $1, %xmm0, %ymm1, %ymm2 # CHECK-NEXT: 1 9 0.50 * vinserti128 $1, (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 8 0.50 * vmovntdqa (%rax), %ymm0 -# CHECK-NEXT: 1 100 - vmpsadbw $1, %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vmpsadbw $1, (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vmpsadbw $1, %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vmpsadbw $1, (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 1 0.25 vpabsb %ymm0, %ymm2 # CHECK-NEXT: 1 8 0.50 * vpabsb (%rax), %ymm2 # CHECK-NEXT: 1 1 0.25 vpabsd %ymm0, %ymm2 @@ -563,38 +563,38 @@ # CHECK-NEXT: 1 107 0.50 * vpermps (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 2 0.25 vpermq $1, %ymm0, %ymm2 # CHECK-NEXT: 1 9 0.50 * vpermq $1, (%rax), %ymm2 -# CHECK-NEXT: 1 100 - * vpgatherdd %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vpgatherdd %ymm0, (%rax,%ymm1,2), %ymm2 -# CHECK-NEXT: 1 100 - * vpgatherdq %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vpgatherdq %ymm0, (%rax,%xmm1,2), %ymm2 -# CHECK-NEXT: 1 100 - * vpgatherqd %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vpgatherqq %ymm0, (%rax,%ymm1,2), %ymm2 -# CHECK-NEXT: 1 100 - * vpgatherqq %xmm0, (%rax,%xmm1,2), %xmm2 -# CHECK-NEXT: 1 100 - * vpgatherqq %ymm0, (%rax,%ymm1,2), %ymm2 -# CHECK-NEXT: 1 100 - vphaddd %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vphaddd (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vphaddsw %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vphaddsw (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vphaddw %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vphaddw (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vphsubd %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vphsubd (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vphsubsw %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vphsubsw (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - vphsubw %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vphsubw (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherdd %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherdd %ymm0, (%rax,%ymm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherdq %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherdq %ymm0, (%rax,%xmm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherqd %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherqq %ymm0, (%rax,%ymm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherqq %xmm0, (%rax,%xmm1,2), %xmm2 +# CHECK-NEXT: 1 100 0.25 * vpgatherqq %ymm0, (%rax,%ymm1,2), %ymm2 +# CHECK-NEXT: 1 100 0.25 vphaddd %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vphaddd (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vphaddsw %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vphaddsw (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vphaddw %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vphaddw (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vphsubd %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vphsubd (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vphsubsw %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vphsubsw (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 vphsubw %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vphsubw (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 4 1.00 vpmaddubsw %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 1 11 1.00 * vpmaddubsw (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 4 1.00 vpmaddwd %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 1 11 1.00 * vpmaddwd (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 1 100 - * vpmaskmovd (%rax), %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * vpmaskmovd (%rax), %ymm0, %ymm2 -# CHECK-NEXT: 1 100 - * * vpmaskmovd %xmm0, %xmm1, (%rax) -# CHECK-NEXT: 1 100 - * * vpmaskmovd %ymm0, %ymm1, (%rax) +# CHECK-NEXT: 1 100 0.25 * vpmaskmovd (%rax), %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vpmaskmovd (%rax), %ymm0, %ymm2 +# CHECK-NEXT: 1 100 0.25 * * vpmaskmovd %xmm0, %xmm1, (%rax) +# CHECK-NEXT: 1 100 0.25 * * vpmaskmovd %ymm0, %ymm1, (%rax) # CHECK-NEXT: 2 8 1.00 * vpmaskmovq (%rax), %xmm0, %xmm2 # CHECK-NEXT: 2 9 1.50 * vpmaskmovq (%rax), %ymm0, %ymm2 -# CHECK-NEXT: 1 100 - * * vpmaskmovq %xmm0, %xmm1, (%rax) -# CHECK-NEXT: 1 100 - * * vpmaskmovq %ymm0, %ymm1, (%rax) +# CHECK-NEXT: 1 100 0.25 * * vpmaskmovq %xmm0, %xmm1, (%rax) +# CHECK-NEXT: 1 100 0.25 * * vpmaskmovq %ymm0, %ymm1, (%rax) # CHECK-NEXT: 1 1 0.25 vpmaxsb %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 1 8 0.50 * vpmaxsb (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 1 0.25 vpmaxsd %ymm0, %ymm1, %ymm2 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-bmi2.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-bmi2.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-bmi2.s @@ -66,14 +66,14 @@ # CHECK-NEXT: 1 8 2.00 * mulxl (%rax), %ebx, %ecx # CHECK-NEXT: 1 3 1.00 mulxq %rax, %rbx, %rcx # CHECK-NEXT: 1 8 1.00 * mulxq (%rax), %rbx, %rcx -# CHECK-NEXT: 1 100 - pdepl %eax, %ebx, %ecx -# CHECK-NEXT: 1 100 - * pdepl (%rax), %ebx, %ecx -# CHECK-NEXT: 1 100 - pdepq %rax, %rbx, %rcx -# CHECK-NEXT: 1 100 - * pdepq (%rax), %rbx, %rcx -# CHECK-NEXT: 1 100 - pextl %eax, %ebx, %ecx -# CHECK-NEXT: 1 100 - * pextl (%rax), %ebx, %ecx -# CHECK-NEXT: 1 100 - pextq %rax, %rbx, %rcx -# CHECK-NEXT: 1 100 - * pextq (%rax), %rbx, %rcx +# CHECK-NEXT: 1 100 0.25 pdepl %eax, %ebx, %ecx +# CHECK-NEXT: 1 100 0.25 * pdepl (%rax), %ebx, %ecx +# CHECK-NEXT: 1 100 0.25 pdepq %rax, %rbx, %rcx +# CHECK-NEXT: 1 100 0.25 * pdepq (%rax), %rbx, %rcx +# CHECK-NEXT: 1 100 0.25 pextl %eax, %ebx, %ecx +# CHECK-NEXT: 1 100 0.25 * pextl (%rax), %ebx, %ecx +# CHECK-NEXT: 1 100 0.25 pextq %rax, %rbx, %rcx +# CHECK-NEXT: 1 100 0.25 * pextq (%rax), %rbx, %rcx # CHECK-NEXT: 1 1 0.25 rorxl $1, %eax, %ecx # CHECK-NEXT: 2 5 0.50 * rorxl $1, (%rax), %ecx # CHECK-NEXT: 1 1 0.25 rorxq $1, %rax, %rcx Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-f16c.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-f16c.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-f16c.s @@ -22,14 +22,14 @@ # CHECK-NEXT: [6]: HasSideEffects # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 1 100 - vcvtph2ps %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * vcvtph2ps (%rax), %xmm2 -# CHECK-NEXT: 1 100 - vcvtph2ps %xmm0, %ymm2 -# CHECK-NEXT: 1 100 - * vcvtph2ps (%rax), %ymm2 -# CHECK-NEXT: 1 100 - vcvtps2ph $0, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * vcvtps2ph $0, %xmm0, (%rax) -# CHECK-NEXT: 1 100 - vcvtps2ph $0, %ymm0, %xmm2 -# CHECK-NEXT: 1 100 - * vcvtps2ph $0, %ymm0, (%rax) +# CHECK-NEXT: 1 100 0.25 vcvtph2ps %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vcvtph2ps (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 vcvtph2ps %xmm0, %ymm2 +# CHECK-NEXT: 1 100 0.25 * vcvtph2ps (%rax), %ymm2 +# CHECK-NEXT: 1 100 0.25 vcvtps2ph $0, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vcvtps2ph $0, %xmm0, (%rax) +# CHECK-NEXT: 1 100 0.25 vcvtps2ph $0, %ymm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * vcvtps2ph $0, %ymm0, (%rax) # CHECK: Resources: # CHECK-NEXT: [0] - ZnAGU0 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s @@ -230,8 +230,8 @@ # CHECK-NEXT: 1 22 1.00 * divps (%rax), %xmm2 # CHECK-NEXT: 1 15 1.00 divss %xmm0, %xmm2 # CHECK-NEXT: 1 22 1.00 * divss (%rax), %xmm2 -# CHECK-NEXT: 1 100 - * * * ldmxcsr (%rax) -# CHECK-NEXT: 1 100 - * * * maskmovq %mm0, %mm1 +# CHECK-NEXT: 1 100 0.25 * * * ldmxcsr (%rax) +# CHECK-NEXT: 1 100 0.25 * * * maskmovq %mm0, %mm1 # CHECK-NEXT: 1 3 1.00 maxps %xmm0, %xmm2 # CHECK-NEXT: 1 10 1.00 * maxps (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 maxss %xmm0, %xmm2 @@ -305,7 +305,7 @@ # CHECK-NEXT: 1 27 20.00 * sqrtps (%rax), %xmm2 # CHECK-NEXT: 1 20 20.00 sqrtss %xmm0, %xmm2 # CHECK-NEXT: 1 27 20.00 * sqrtss (%rax), %xmm2 -# CHECK-NEXT: 1 100 - * * * stmxcsr (%rax) +# CHECK-NEXT: 1 100 0.25 * * * stmxcsr (%rax) # CHECK-NEXT: 1 3 1.00 subps %xmm0, %xmm2 # CHECK-NEXT: 1 10 1.00 * subps (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 subss %xmm0, %xmm2 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s @@ -460,7 +460,7 @@ # CHECK-NEXT: 1 15 1.00 divsd %xmm0, %xmm2 # CHECK-NEXT: 1 22 1.00 * divsd (%rax), %xmm2 # CHECK-NEXT: 1 1 0.50 * * * lfence -# CHECK-NEXT: 1 100 - * * * maskmovdqu %xmm0, %xmm1 +# CHECK-NEXT: 1 100 0.25 * * * maskmovdqu %xmm0, %xmm1 # CHECK-NEXT: 1 3 1.00 maxpd %xmm0, %xmm2 # CHECK-NEXT: 1 10 1.00 * maxpd (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 maxsd %xmm0, %xmm2 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse3.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse3.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse3.s @@ -43,21 +43,21 @@ # CHECK-NEXT: 1 10 1.00 * addsubpd (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 addsubps %xmm0, %xmm2 # CHECK-NEXT: 1 10 1.00 * addsubps (%rax), %xmm2 -# CHECK-NEXT: 1 100 - haddpd %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * haddpd (%rax), %xmm2 -# CHECK-NEXT: 1 100 - haddps %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * haddps (%rax), %xmm2 -# CHECK-NEXT: 1 100 - hsubpd %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * hsubpd (%rax), %xmm2 -# CHECK-NEXT: 1 100 - hsubps %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * hsubps (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 haddpd %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * haddpd (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 haddps %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * haddps (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 hsubpd %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * hsubpd (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 hsubps %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * hsubps (%rax), %xmm2 # CHECK-NEXT: 1 8 0.50 * lddqu (%rax), %xmm2 # CHECK-NEXT: 1 1 0.50 movddup %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * movddup (%rax), %xmm2 # CHECK-NEXT: 1 1 0.50 movshdup %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * movshdup (%rax), %xmm2 -# CHECK-NEXT: 1 100 - movsldup %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * movsldup (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 movsldup %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * movsldup (%rax), %xmm2 # CHECK: Resources: # CHECK-NEXT: [0] - ZnAGU0 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse41.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse41.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse41.s @@ -163,17 +163,17 @@ # CHECK-NEXT: 1 8 0.50 * blendvpd %xmm0, (%rax), %xmm2 # CHECK-NEXT: 1 1 0.50 blendvps %xmm0, %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * blendvps %xmm0, (%rax), %xmm2 -# CHECK-NEXT: 1 100 - dppd $22, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * dppd $22, (%rax), %xmm2 -# CHECK-NEXT: 1 100 - dpps $22, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * dpps $22, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 dppd $22, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * dppd $22, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 dpps $22, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * dpps $22, (%rax), %xmm2 # CHECK-NEXT: 2 2 2.00 extractps $1, %xmm0, %ecx # CHECK-NEXT: 2 5 2.50 * extractps $1, %xmm0, (%rax) # CHECK-NEXT: 1 1 0.50 insertps $1, %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * insertps $1, (%rax), %xmm2 # CHECK-NEXT: 1 8 0.50 * movntdqa (%rax), %xmm2 -# CHECK-NEXT: 1 100 - mpsadbw $1, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * mpsadbw $1, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 mpsadbw $1, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * mpsadbw $1, (%rax), %xmm2 # CHECK-NEXT: 1 1 0.25 packusdw %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * packusdw (%rax), %xmm2 # CHECK-NEXT: 1 1 1.00 pblendvb %xmm0, %xmm0, %xmm2 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse42.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse42.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse42.s @@ -50,14 +50,14 @@ # CHECK-NEXT: 1 10 1.00 * crc32b (%rax), %rcx # CHECK-NEXT: 1 3 1.00 crc32q %rax, %rcx # CHECK-NEXT: 1 10 1.00 * crc32q (%rax), %rcx -# CHECK-NEXT: 1 100 - pcmpestri $1, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * pcmpestri $1, (%rax), %xmm2 -# CHECK-NEXT: 1 100 - pcmpestrm $1, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * pcmpestrm $1, (%rax), %xmm2 -# CHECK-NEXT: 1 100 - pcmpistri $1, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * pcmpistri $1, (%rax), %xmm2 -# CHECK-NEXT: 1 100 - pcmpistrm $1, %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * pcmpistrm $1, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 pcmpestri $1, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * pcmpestri $1, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 pcmpestrm $1, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * pcmpestrm $1, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 pcmpistri $1, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * pcmpistri $1, (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 pcmpistrm $1, %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * pcmpistrm $1, (%rax), %xmm2 # CHECK-NEXT: 1 1 0.50 pcmpgtq %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * pcmpgtq (%rax), %xmm2 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-ssse3.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-ssse3.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-ssse3.s @@ -122,30 +122,30 @@ # CHECK-NEXT: 1 8 0.50 * palignr $1, (%rax), %mm2 # CHECK-NEXT: 1 1 0.25 palignr $1, %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * palignr $1, (%rax), %xmm2 -# CHECK-NEXT: 1 100 - phaddd %mm0, %mm2 -# CHECK-NEXT: 1 100 - * phaddd (%rax), %mm2 -# CHECK-NEXT: 1 100 - phaddd %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * phaddd (%rax), %xmm2 -# CHECK-NEXT: 1 100 - phaddsw %mm0, %mm2 -# CHECK-NEXT: 1 100 - * phaddsw (%rax), %mm2 -# CHECK-NEXT: 1 100 - phaddsw %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * phaddsw (%rax), %xmm2 -# CHECK-NEXT: 1 100 - phaddw %mm0, %mm2 -# CHECK-NEXT: 1 100 - * phaddw (%rax), %mm2 -# CHECK-NEXT: 1 100 - phaddw %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * phaddw (%rax), %xmm2 -# CHECK-NEXT: 1 100 - phsubd %mm0, %mm2 -# CHECK-NEXT: 1 100 - * phsubd (%rax), %mm2 -# CHECK-NEXT: 1 100 - phsubd %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * phsubd (%rax), %xmm2 -# CHECK-NEXT: 1 100 - phsubsw %mm0, %mm2 -# CHECK-NEXT: 1 100 - * phsubsw (%rax), %mm2 -# CHECK-NEXT: 1 100 - phsubsw %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * phsubsw (%rax), %xmm2 -# CHECK-NEXT: 1 100 - phsubw %mm0, %mm2 -# CHECK-NEXT: 1 100 - * phsubw (%rax), %mm2 -# CHECK-NEXT: 1 100 - phsubw %xmm0, %xmm2 -# CHECK-NEXT: 1 100 - * phsubw (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 phaddd %mm0, %mm2 +# CHECK-NEXT: 1 100 0.25 * phaddd (%rax), %mm2 +# CHECK-NEXT: 1 100 0.25 phaddd %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * phaddd (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 phaddsw %mm0, %mm2 +# CHECK-NEXT: 1 100 0.25 * phaddsw (%rax), %mm2 +# CHECK-NEXT: 1 100 0.25 phaddsw %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * phaddsw (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 phaddw %mm0, %mm2 +# CHECK-NEXT: 1 100 0.25 * phaddw (%rax), %mm2 +# CHECK-NEXT: 1 100 0.25 phaddw %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * phaddw (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 phsubd %mm0, %mm2 +# CHECK-NEXT: 1 100 0.25 * phsubd (%rax), %mm2 +# CHECK-NEXT: 1 100 0.25 phsubd %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * phsubd (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 phsubsw %mm0, %mm2 +# CHECK-NEXT: 1 100 0.25 * phsubsw (%rax), %mm2 +# CHECK-NEXT: 1 100 0.25 phsubsw %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * phsubsw (%rax), %xmm2 +# CHECK-NEXT: 1 100 0.25 phsubw %mm0, %mm2 +# CHECK-NEXT: 1 100 0.25 * phsubw (%rax), %mm2 +# CHECK-NEXT: 1 100 0.25 phsubw %xmm0, %xmm2 +# CHECK-NEXT: 1 100 0.25 * phsubw (%rax), %xmm2 # CHECK-NEXT: 1 4 1.00 pmaddubsw %mm0, %mm2 # CHECK-NEXT: 1 11 1.00 * pmaddubsw (%rax), %mm2 # CHECK-NEXT: 1 4 1.00 pmaddubsw %xmm0, %xmm2 Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-x86_64.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-x86_64.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-x86_64.s @@ -822,13 +822,13 @@ # CHECK-NEXT: 2 5 0.50 * * negl (%rax) # CHECK-NEXT: 1 1 0.25 negq %rcx # CHECK-NEXT: 2 5 0.50 * * negq (%r10) -# CHECK-NEXT: 1 1 - nop -# CHECK-NEXT: 1 1 - nopw %di -# CHECK-NEXT: 1 1 - nopw (%rcx) -# CHECK-NEXT: 1 1 - nopl %esi -# CHECK-NEXT: 1 1 - nopl (%r8) -# CHECK-NEXT: 1 1 - nopq %rdx -# CHECK-NEXT: 1 1 - nopq (%r9) +# CHECK-NEXT: 1 1 0.25 nop +# CHECK-NEXT: 1 1 0.25 nopw %di +# CHECK-NEXT: 1 1 0.25 nopw (%rcx) +# CHECK-NEXT: 1 1 0.25 nopl %esi +# CHECK-NEXT: 1 1 0.25 nopl (%r8) +# CHECK-NEXT: 1 1 0.25 nopq %rdx +# CHECK-NEXT: 1 1 0.25 nopq (%r9) # CHECK-NEXT: 1 1 0.25 notb %dil # CHECK-NEXT: 2 5 0.50 * * notb (%r8) # CHECK-NEXT: 1 1 0.25 notw %si @@ -869,52 +869,52 @@ # CHECK-NEXT: 2 5 0.50 * orq (%rax), %rdi # CHECK-NEXT: 1 1 0.25 rclb %dil # CHECK-NEXT: 1 1 0.25 rcrb %dil -# CHECK-NEXT: 1 100 - * rclb (%rax) -# CHECK-NEXT: 1 100 - * rcrb (%rax) +# CHECK-NEXT: 1 100 0.25 * rclb (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrb (%rax) # CHECK-NEXT: 1 1 0.25 rclb $7, %dil # CHECK-NEXT: 1 1 0.25 rcrb $7, %dil -# CHECK-NEXT: 1 100 - * rclb $7, (%rax) -# CHECK-NEXT: 1 100 - * rcrb $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rclb $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrb $7, (%rax) # CHECK-NEXT: 1 1 0.25 rclb %cl, %dil # CHECK-NEXT: 1 1 0.25 rcrb %cl, %dil -# CHECK-NEXT: 1 100 - * rclb %cl, (%rax) -# CHECK-NEXT: 1 100 - * rcrb %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rclb %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrb %cl, (%rax) # CHECK-NEXT: 1 1 0.25 rclw %di # CHECK-NEXT: 1 1 0.25 rcrw %di -# CHECK-NEXT: 1 100 - * rclw (%rax) -# CHECK-NEXT: 1 100 - * rcrw (%rax) +# CHECK-NEXT: 1 100 0.25 * rclw (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrw (%rax) # CHECK-NEXT: 1 1 0.25 rclw $7, %di # CHECK-NEXT: 1 1 0.25 rcrw $7, %di -# CHECK-NEXT: 1 100 - * rclw $7, (%rax) -# CHECK-NEXT: 1 100 - * rcrw $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rclw $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrw $7, (%rax) # CHECK-NEXT: 1 1 0.25 rclw %cl, %di # CHECK-NEXT: 1 1 0.25 rcrw %cl, %di -# CHECK-NEXT: 1 100 - * rclw %cl, (%rax) -# CHECK-NEXT: 1 100 - * rcrw %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rclw %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrw %cl, (%rax) # CHECK-NEXT: 1 1 0.25 rcll %edi # CHECK-NEXT: 1 1 0.25 rcrl %edi -# CHECK-NEXT: 1 100 - * rcll (%rax) -# CHECK-NEXT: 1 100 - * rcrl (%rax) +# CHECK-NEXT: 1 100 0.25 * rcll (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrl (%rax) # CHECK-NEXT: 1 1 0.25 rcll $7, %edi # CHECK-NEXT: 1 1 0.25 rcrl $7, %edi -# CHECK-NEXT: 1 100 - * rcll $7, (%rax) -# CHECK-NEXT: 1 100 - * rcrl $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcll $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrl $7, (%rax) # CHECK-NEXT: 1 1 0.25 rcll %cl, %edi # CHECK-NEXT: 1 1 0.25 rcrl %cl, %edi -# CHECK-NEXT: 1 100 - * rcll %cl, (%rax) -# CHECK-NEXT: 1 100 - * rcrl %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcll %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrl %cl, (%rax) # CHECK-NEXT: 1 1 0.25 rclq %rdi # CHECK-NEXT: 1 1 0.25 rcrq %rdi -# CHECK-NEXT: 1 100 - * rclq (%rax) -# CHECK-NEXT: 1 100 - * rcrq (%rax) +# CHECK-NEXT: 1 100 0.25 * rclq (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrq (%rax) # CHECK-NEXT: 1 1 0.25 rclq $7, %rdi # CHECK-NEXT: 1 1 0.25 rcrq $7, %rdi -# CHECK-NEXT: 1 100 - * rclq $7, (%rax) -# CHECK-NEXT: 1 100 - * rcrq $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rclq $7, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrq $7, (%rax) # CHECK-NEXT: 1 1 0.25 rclq %cl, %rdi # CHECK-NEXT: 1 1 0.25 rcrq %cl, %rdi -# CHECK-NEXT: 1 100 - * rclq %cl, (%rax) -# CHECK-NEXT: 1 100 - * rcrq %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rclq %cl, (%rax) +# CHECK-NEXT: 1 100 0.25 * rcrq %cl, (%rax) # CHECK-NEXT: 1 1 0.25 rolb %dil # CHECK-NEXT: 1 1 0.25 rorb %dil # CHECK-NEXT: 3 5 1.00 * * rolb (%rax) @@ -1065,26 +1065,26 @@ # CHECK-NEXT: 1 1 0.25 sbbq %rdi, %rdi # CHECK-NEXT: 2 5 0.50 * * sbbq %rdi, (%rax) # CHECK-NEXT: 2 5 0.50 * sbbq (%rax), %rdi -# CHECK-NEXT: 1 100 - shldw %cl, %si, %di -# CHECK-NEXT: 1 100 - shrdw %cl, %si, %di -# CHECK-NEXT: 1 100 - * * shldw %cl, %si, (%rax) -# CHECK-NEXT: 1 100 - * * shrdw %cl, %si, (%rax) +# CHECK-NEXT: 1 100 0.25 shldw %cl, %si, %di +# CHECK-NEXT: 1 100 0.25 shrdw %cl, %si, %di +# CHECK-NEXT: 1 100 0.25 * * shldw %cl, %si, (%rax) +# CHECK-NEXT: 1 100 0.25 * * shrdw %cl, %si, (%rax) # CHECK-NEXT: 1 1 0.25 shldw $7, %si, %di # CHECK-NEXT: 1 1 0.25 shrdw $7, %si, %di # CHECK-NEXT: 2 5 0.50 * * shldw $7, %si, (%rax) # CHECK-NEXT: 2 5 0.50 * * shrdw $7, %si, (%rax) -# CHECK-NEXT: 1 100 - shldl %cl, %esi, %edi -# CHECK-NEXT: 1 100 - shrdl %cl, %esi, %edi -# CHECK-NEXT: 1 100 - * * shldl %cl, %esi, (%rax) -# CHECK-NEXT: 1 100 - * * shrdl %cl, %esi, (%rax) +# CHECK-NEXT: 1 100 0.25 shldl %cl, %esi, %edi +# CHECK-NEXT: 1 100 0.25 shrdl %cl, %esi, %edi +# CHECK-NEXT: 1 100 0.25 * * shldl %cl, %esi, (%rax) +# CHECK-NEXT: 1 100 0.25 * * shrdl %cl, %esi, (%rax) # CHECK-NEXT: 1 1 0.25 shldl $7, %esi, %edi # CHECK-NEXT: 1 1 0.25 shrdl $7, %esi, %edi # CHECK-NEXT: 2 5 0.50 * * shldl $7, %esi, (%rax) # CHECK-NEXT: 2 5 0.50 * * shrdl $7, %esi, (%rax) -# CHECK-NEXT: 1 100 - shldq %cl, %rsi, %rdi -# CHECK-NEXT: 1 100 - shrdq %cl, %rsi, %rdi -# CHECK-NEXT: 1 100 - * * shldq %cl, %rsi, (%rax) -# CHECK-NEXT: 1 100 - * * shrdq %cl, %rsi, (%rax) +# CHECK-NEXT: 1 100 0.25 shldq %cl, %rsi, %rdi +# CHECK-NEXT: 1 100 0.25 shrdq %cl, %rsi, %rdi +# CHECK-NEXT: 1 100 0.25 * * shldq %cl, %rsi, (%rax) +# CHECK-NEXT: 1 100 0.25 * * shrdq %cl, %rsi, (%rax) # CHECK-NEXT: 1 1 0.25 shldq $7, %rsi, %rdi # CHECK-NEXT: 1 1 0.25 shrdq $7, %rsi, %rdi # CHECK-NEXT: 2 5 0.50 * * shldq $7, %rsi, (%rax) Index: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-x87.s =================================================================== --- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-x87.s +++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-x87.s @@ -206,7 +206,7 @@ # CHECK-NEXT: [6]: HasSideEffects # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 1 100 - * f2xm1 +# CHECK-NEXT: 1 100 0.25 * f2xm1 # CHECK-NEXT: 1 2 1.00 * fabs # CHECK-NEXT: 1 3 1.00 * fadd %st(0), %st(1) # CHECK-NEXT: 1 3 1.00 * fadd %st(2) @@ -216,18 +216,18 @@ # CHECK-NEXT: 1 3 1.00 * faddp %st(2) # CHECK-NEXT: 1 10 1.00 * * fiadds (%ecx) # CHECK-NEXT: 1 10 1.00 * * fiaddl (%ecx) -# CHECK-NEXT: 1 100 - * fbld (%ecx) -# CHECK-NEXT: 1 100 - * fbstp (%eax) +# CHECK-NEXT: 1 100 0.25 * fbld (%ecx) +# CHECK-NEXT: 1 100 0.25 * fbstp (%eax) # CHECK-NEXT: 1 1 1.00 * fchs -# CHECK-NEXT: 1 100 - * fnclex -# CHECK-NEXT: 1 100 - * fcmovb %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmovbe %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmove %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmovnb %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmovnbe %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmovne %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmovnu %st(1), %st(0) -# CHECK-NEXT: 1 100 - * fcmovu %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fnclex +# CHECK-NEXT: 1 100 0.25 * fcmovb %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmovbe %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmove %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmovnb %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmovnbe %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmovne %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmovnu %st(1), %st(0) +# CHECK-NEXT: 1 100 0.25 * fcmovu %st(1), %st(0) # CHECK-NEXT: 1 1 1.00 * fcom %st(1) # CHECK-NEXT: 1 1 1.00 * fcom %st(3) # CHECK-NEXT: 1 8 1.00 * fcoms (%ecx) @@ -239,7 +239,7 @@ # CHECK-NEXT: 1 1 1.00 * fcompp # CHECK-NEXT: 1 9 0.50 * fcomi %st(3) # CHECK-NEXT: 1 9 0.50 * fcompi %st(3) -# CHECK-NEXT: 1 100 - * fcos +# CHECK-NEXT: 1 100 0.25 * fcos # CHECK-NEXT: 1 11 1.00 * fdecstp # CHECK-NEXT: 1 15 1.00 * fdiv %st(0), %st(1) # CHECK-NEXT: 1 15 1.00 * fdiv %st(2) @@ -266,7 +266,7 @@ # CHECK-NEXT: 2 11 1.00 * * fildl (%ecx) # CHECK-NEXT: 2 11 1.00 * * fildll (%eax) # CHECK-NEXT: 1 11 1.00 * fincstp -# CHECK-NEXT: 1 100 - * fninit +# CHECK-NEXT: 1 100 0.25 * fninit # CHECK-NEXT: 1 12 0.50 * * fists (%edx) # CHECK-NEXT: 1 12 0.50 * * fistl (%ecx) # CHECK-NEXT: 1 12 0.50 * * fistps (%edx) @@ -279,8 +279,8 @@ # CHECK-NEXT: 1 8 0.50 * * flds (%edx) # CHECK-NEXT: 1 8 0.50 * * fldl (%ecx) # CHECK-NEXT: 2 1 0.50 * * fldt (%eax) -# CHECK-NEXT: 1 100 - * * fldcw (%eax) -# CHECK-NEXT: 1 100 - * fldenv (%eax) +# CHECK-NEXT: 1 100 0.25 * * fldcw (%eax) +# CHECK-NEXT: 1 100 0.25 * fldenv (%eax) # CHECK-NEXT: 1 11 1.00 * fld1 # CHECK-NEXT: 1 11 1.00 * fldl2e # CHECK-NEXT: 1 11 1.00 * fldl2t @@ -297,16 +297,16 @@ # CHECK-NEXT: 2 10 0.50 * * fimuls (%ecx) # CHECK-NEXT: 2 10 0.50 * * fimull (%eax) # CHECK-NEXT: 1 1 1.00 * fnop -# CHECK-NEXT: 1 100 - * fpatan -# CHECK-NEXT: 1 100 - * fprem -# CHECK-NEXT: 1 100 - * fprem1 -# CHECK-NEXT: 1 100 - * fptan -# CHECK-NEXT: 1 100 - * frndint -# CHECK-NEXT: 1 100 - * frstor (%eax) -# CHECK-NEXT: 1 100 - * fnsave (%eax) -# CHECK-NEXT: 1 100 - * fscale -# CHECK-NEXT: 1 100 - * fsin -# CHECK-NEXT: 1 100 - * fsincos +# CHECK-NEXT: 1 100 0.25 * fpatan +# CHECK-NEXT: 1 100 0.25 * fprem +# CHECK-NEXT: 1 100 0.25 * fprem1 +# CHECK-NEXT: 1 100 0.25 * fptan +# CHECK-NEXT: 1 100 0.25 * frndint +# CHECK-NEXT: 1 100 0.25 * frstor (%eax) +# CHECK-NEXT: 1 100 0.25 * fnsave (%eax) +# CHECK-NEXT: 1 100 0.25 * fscale +# CHECK-NEXT: 1 100 0.25 * fsin +# CHECK-NEXT: 1 100 0.25 * fsincos # CHECK-NEXT: 1 20 20.00 * fsqrt # CHECK-NEXT: 2 5 0.50 * fst %st(0) # CHECK-NEXT: 1 1 0.50 * * fsts (%edx) @@ -315,12 +315,12 @@ # CHECK-NEXT: 1 1 0.50 * * fstpl (%edx) # CHECK-NEXT: 1 1 0.50 * * fstpl (%ecx) # CHECK-NEXT: 1 5 0.50 * * fstpt (%eax) -# CHECK-NEXT: 1 100 - * * fnstcw (%eax) -# CHECK-NEXT: 1 100 - * fnstenv (%eax) -# CHECK-NEXT: 1 100 - * fnstsw (%eax) -# CHECK-NEXT: 1 100 - * frstor (%eax) +# CHECK-NEXT: 1 100 0.25 * * fnstcw (%eax) +# CHECK-NEXT: 1 100 0.25 * fnstenv (%eax) +# CHECK-NEXT: 1 100 0.25 * fnstsw (%eax) +# CHECK-NEXT: 1 100 0.25 * frstor (%eax) # CHECK-NEXT: 1 1 1.00 * wait -# CHECK-NEXT: 1 100 - * fnsave (%eax) +# CHECK-NEXT: 1 100 0.25 * fnsave (%eax) # CHECK-NEXT: 1 3 1.00 * fsub %st(0), %st(1) # CHECK-NEXT: 1 3 1.00 * fsub %st(2) # CHECK-NEXT: 1 10 1.00 * * fsubs (%ecx) @@ -349,11 +349,11 @@ # CHECK-NEXT: 1 1 1.00 * fxam # CHECK-NEXT: 1 1 0.25 * fxch %st(1) # CHECK-NEXT: 1 1 0.25 * fxch %st(3) -# CHECK-NEXT: 1 100 - * * * fxrstor (%eax) -# CHECK-NEXT: 1 100 - * * * fxsave (%eax) -# CHECK-NEXT: 1 100 - * fxtract -# CHECK-NEXT: 1 100 - * fyl2x -# CHECK-NEXT: 1 100 - * fyl2xp1 +# CHECK-NEXT: 1 100 0.25 * * * fxrstor (%eax) +# CHECK-NEXT: 1 100 0.25 * * * fxsave (%eax) +# CHECK-NEXT: 1 100 0.25 * fxtract +# CHECK-NEXT: 1 100 0.25 * fyl2x +# CHECK-NEXT: 1 100 0.25 * fyl2xp1 # CHECK: Resources: # CHECK-NEXT: [0] - ZnAGU0