VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 43667

Last change on this file since 43667 was 41965, checked in by vboxsync, 12 years ago

VMM: ran scm. Mostly svn:keywords changes (adding Revision).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 14.1 KB
RevLine 
[23]1/* $Id: TMAllCpu.cpp 41965 2012-06-29 02:52:49Z vboxsync $ */
[1]2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
[28800]7 * Copyright (C) 2006-2007 Oracle Corporation
[1]8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
[5999]12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
[35346]23#include <VBox/vmm/tm.h>
[29250]24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
[35333]25#include "TMInternal.h"
[35346]26#include <VBox/vmm/vm.h>
[1057]27#include <VBox/sup.h>
[1]28
29#include <VBox/param.h>
30#include <VBox/err.h>
[29250]31#include <iprt/asm-math.h>
[1]32#include <iprt/assert.h>
[9083]33#include <VBox/log.h>
[1]34
35
36/**
[1057]37 * Gets the raw cpu tick from current virtual time.
38 */
[2081]39DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
[1057]40{
[32419]41 uint64_t u64;
42 if (fCheckTimers)
43 u64 = TMVirtualSyncGet(pVM);
44 else
45 u64 = TMVirtualSyncGetNoCheck(pVM);
46 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
[1956]47 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
[1057]48 return u64;
49}
50
51
52/**
[1]53 * Resumes the CPU timestamp counter ticking.
54 *
55 * @returns VBox status code.
[41801]56 * @param pVM Pointer to the VM.
57 * @param pVCpu Pointer to the VMCPU.
[12549]58 * @internal
[1]59 */
[19032]60int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
[1]61{
[19032]62 if (!pVCpu->tm.s.fTSCTicking)
[1]63 {
[19032]64 pVCpu->tm.s.fTSCTicking = true;
[1057]65 if (pVM->tm.s.fTSCVirtualized)
66 {
[20675]67 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
68 * unpaused before the virtual time and stopped after it. */
[1057]69 if (pVM->tm.s.fTSCUseRealTSC)
[20689]70 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC;
[1057]71 else
[20689]72 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
[19032]73 - pVCpu->tm.s.u64TSC;
[1057]74 }
[1]75 return VINF_SUCCESS;
76 }
77 AssertFailed();
[39402]78 return VERR_TM_TSC_ALREADY_TICKING;
[1]79}
80
81
82/**
83 * Pauses the CPU timestamp counter ticking.
84 *
85 * @returns VBox status code.
[41801]86 * @param pVCpu Pointer to the VMCPU.
[12549]87 * @internal
[1]88 */
[39078]89int tmCpuTickPause(PVMCPU pVCpu)
[1]90{
[19032]91 if (pVCpu->tm.s.fTSCTicking)
[1]92 {
[19747]93 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
[19032]94 pVCpu->tm.s.fTSCTicking = false;
[1]95 return VINF_SUCCESS;
96 }
97 AssertFailed();
[39402]98 return VERR_TM_TSC_ALREADY_PAUSED;
[1]99}
100
[32419]101/**
102 * Record why we refused to use offsetted TSC.
103 *
104 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
105 *
[41783]106 * @param pVM Pointer to the VM.
[32419]107 * @param pVCpu The current CPU.
108 */
109DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
110{
[1]111
[32419]112 /* Sample the reason for refusing. */
113 if (!pVM->tm.s.fMaybeUseOffsettedHostTSC)
114 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
115 else if (!pVCpu->tm.s.fTSCTicking)
116 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
117 else if (!pVM->tm.s.fTSCUseRealTSC)
118 {
119 if (pVM->tm.s.fVirtualSyncCatchUp)
120 {
121 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
122 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
123 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
124 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
125 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
126 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
127 else
128 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
129 }
130 else if (!pVM->tm.s.fVirtualSyncTicking)
131 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
132 else if (pVM->tm.s.fVirtualWarpDrive)
133 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
134 }
135}
136
137
[2061]138/**
[7114]139 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
[2061]140 *
[2581]141 * @returns true/false accordingly.
[41801]142 * @param pVCpu Pointer to the VMCPU.
[2581]143 * @param poffRealTSC The offset against the TSC of the current CPU.
144 * Can be NULL.
145 * @thread EMT.
146 */
[22808]147VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC)
[2581]148{
[19032]149 PVM pVM = pVCpu->CTX_SUFF(pVM);
150
[2581]151 /*
152 * We require:
153 * 1. A fixed TSC, this is checked at init time.
154 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
155 * 3. Either that we're using the real TSC as time source or
[12549]156 * a) we don't have any lag to catch up, and
157 * b) the virtual sync clock hasn't been halted by an expired timer, and
158 * c) we're not using warp drive (accelerated virtual guest time).
[2581]159 */
160 if ( pVM->tm.s.fMaybeUseOffsettedHostTSC
[19032]161 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)
[2581]162 && ( pVM->tm.s.fTSCUseRealTSC
163 || ( !pVM->tm.s.fVirtualSyncCatchUp
164 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
165 && !pVM->tm.s.fVirtualWarpDrive))
166 )
167 {
168 if (!pVM->tm.s.fTSCUseRealTSC)
169 {
170 /* The source is the timer synchronous virtual clock. */
171 Assert(pVM->tm.s.fTSCVirtualized);
172
173 if (poffRealTSC)
174 {
175 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
[20689]176 - pVCpu->tm.s.offTSCRawSrc;
[7114]177 /** @todo When we start collecting statistics on how much time we spend executing
178 * guest code before exiting, we should check this against the next virtual sync
179 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
[2581]180 * the chance that we'll get interrupted right after the timer expired. */
181 *poffRealTSC = u64Now - ASMReadTSC();
182 }
183 }
184 else if (poffRealTSC)
185 {
186 /* The source is the real TSC. */
187 if (pVM->tm.s.fTSCVirtualized)
[20689]188 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
[2581]189 else
190 *poffRealTSC = 0;
191 }
[13586]192 /** @todo count this? */
[2581]193 return true;
194 }
195
[13586]196#ifdef VBOX_WITH_STATISTICS
[32419]197 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
198#endif
199 return false;
200}
201
202
203/**
204 * Calculates the number of host CPU ticks till the next virtual sync deadline.
205 *
206 * @note To save work, this function will not bother calculating the accurate
207 * tick count for deadlines that are more than a second ahead.
208 *
209 * @returns The number of host cpu ticks to the next deadline. Max one second.
210 * @param cNsToDeadline The number of nano seconds to the next virtual
211 * sync deadline.
212 */
213DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline)
214{
215 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
216 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
217 return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
218 uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage),
219 cNsToDeadline,
220 TMCLOCK_FREQ_VIRTUAL);
221 if (cTicks > 4000)
222 cTicks -= 4000; /* fudge to account for overhead */
223 else
224 cTicks >>= 1;
225 return cTicks;
226}
227
228
229/**
230 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
231 * use the raw TSC.
232 *
233 * @returns The number of host CPU clock ticks to the next timer deadline.
234 * @param pVCpu The current CPU.
235 * @param poffRealTSC The offset against the TSC of the current CPU.
236 * @thread EMT(pVCpu).
237 * @remarks Superset of TMCpuTickCanUseRealTSC.
238 */
239VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC)
240{
241 PVM pVM = pVCpu->CTX_SUFF(pVM);
242 uint64_t cTicksToDeadline;
243
244 /*
245 * We require:
246 * 1. A fixed TSC, this is checked at init time.
247 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
248 * 3. Either that we're using the real TSC as time source or
249 * a) we don't have any lag to catch up, and
250 * b) the virtual sync clock hasn't been halted by an expired timer, and
251 * c) we're not using warp drive (accelerated virtual guest time).
252 */
253 if ( pVM->tm.s.fMaybeUseOffsettedHostTSC
254 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)
255 && ( pVM->tm.s.fTSCUseRealTSC
256 || ( !pVM->tm.s.fVirtualSyncCatchUp
257 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
258 && !pVM->tm.s.fVirtualWarpDrive))
259 )
[13586]260 {
[32419]261 *pfOffsettedTsc = true;
262 if (!pVM->tm.s.fTSCUseRealTSC)
[13586]263 {
[32419]264 /* The source is the timer synchronous virtual clock. */
265 Assert(pVM->tm.s.fTSCVirtualized);
266
267 uint64_t cNsToDeadline;
268 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
269 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
270 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
271 : u64NowVirtSync;
272 u64Now -= pVCpu->tm.s.offTSCRawSrc;
273 *poffRealTSC = u64Now - ASMReadTSC();
274 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
[13586]275 }
[32419]276 else
277 {
278 /* The source is the real TSC. */
279 if (pVM->tm.s.fTSCVirtualized)
280 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
281 else
282 *poffRealTSC = 0;
283 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
284 }
[13586]285 }
[32419]286 else
287 {
288#ifdef VBOX_WITH_STATISTICS
289 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
[13586]290#endif
[32419]291 *pfOffsettedTsc = false;
292 *poffRealTSC = 0;
293 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
294 }
295 return cTicksToDeadline;
[2581]296}
297
298
299/**
[33540]300 * Read the current CPU timestamp counter.
[1]301 *
302 * @returns Gets the CPU tsc.
[41801]303 * @param pVCpu Pointer to the VMCPU.
[1]304 */
[19747]305DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
[1]306{
[1057]307 uint64_t u64;
[19032]308
309 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
[1057]310 {
[19747]311 PVM pVM = pVCpu->CTX_SUFF(pVM);
[1956]312 if (pVM->tm.s.fTSCVirtualized)
[1057]313 {
314 if (pVM->tm.s.fTSCUseRealTSC)
[1956]315 u64 = ASMReadTSC();
[1057]316 else
[19747]317 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
[20689]318 u64 -= pVCpu->tm.s.offTSCRawSrc;
[1057]319 }
320 else
[1956]321 u64 = ASMReadTSC();
[22242]322
323 /* Never return a value lower than what the guest has already seen. */
324 if (u64 < pVCpu->tm.s.u64TSCLastSeen)
325 {
326 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
327 pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */
328 u64 = pVCpu->tm.s.u64TSCLastSeen;
329 }
[1057]330 }
[1956]331 else
[19032]332 u64 = pVCpu->tm.s.u64TSC;
[1057]333 return u64;
[1]334}
335
336
337/**
[33540]338 * Read the current CPU timestamp counter.
[19747]339 *
340 * @returns Gets the CPU tsc.
[41801]341 * @param pVCpu Pointer to the VMCPU.
[19747]342 */
343VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
344{
345 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
346}
347
348
349/**
[33540]350 * Read the current CPU timestamp counter, don't check for expired timers.
[19747]351 *
352 * @returns Gets the CPU tsc.
[41801]353 * @param pVCpu Pointer to the VMCPU.
[19747]354 */
[22808]355VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
[19747]356{
357 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
358}
359
360
361/**
[1]362 * Sets the current CPU timestamp counter.
363 *
364 * @returns VBox status code.
[41783]365 * @param pVM Pointer to the VM.
[41836]366 * @param pVCpu Pointer to the VMCPU.
[1]367 * @param u64Tick The new timestamp value.
[20678]368 *
369 * @thread EMT which TSC is to be set.
[1]370 */
[22808]371VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
[1]372{
[20678]373 VMCPU_ASSERT_EMT(pVCpu);
374 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
375
[20675]376 /*
377 * This is easier to do when the TSC is paused since resume will
[33540]378 * do all the calculations for us. Actually, we don't need to
[20688]379 * call tmCpuTickPause here since we overwrite u64TSC anyway.
[20675]380 */
[22299]381 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
382 pVCpu->tm.s.fTSCTicking = false;
383 pVCpu->tm.s.u64TSC = u64Tick;
384 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
[20675]385 if (fTSCTicking)
[20688]386 tmCpuTickResume(pVM, pVCpu);
[20678]387 /** @todo Try help synchronizing it better among the virtual CPUs? */
[20675]388
[1]389 return VINF_SUCCESS;
390}
391
[22242]392/**
393 * Sets the last seen CPU timestamp counter.
394 *
395 * @returns VBox status code.
[41836]396 * @param pVCpu Pointer to the VMCPU.
[22242]397 * @param u64LastSeenTick The last seen timestamp value.
398 *
399 * @thread EMT which TSC is to be set.
400 */
[22808]401VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
[22242]402{
403 VMCPU_ASSERT_EMT(pVCpu);
[1]404
[22245]405 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
406 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
407 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
[22242]408 return VINF_SUCCESS;
409}
410
[1]411/**
[22242]412 * Gets the last seen CPU timestamp counter.
413 *
414 * @returns last seen TSC
[41836]415 * @param pVCpu Pointer to the VMCPU.
[22242]416 *
417 * @thread EMT which TSC is to be set.
418 */
[22808]419VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
[22242]420{
421 VMCPU_ASSERT_EMT(pVCpu);
422
423 return pVCpu->tm.s.u64TSCLastSeen;
424}
425
426
427/**
[1]428 * Get the timestamp frequency.
429 *
430 * @returns Number of ticks per second.
431 * @param pVM The VM.
432 */
[12989]433VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
[1]434{
[1057]435 if (pVM->tm.s.fTSCUseRealTSC)
436 {
437 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
438 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
439 return cTSCTicksPerSecond;
440 }
[1]441 return pVM->tm.s.cTSCTicksPerSecond;
442}
443
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use