VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 43667

Last change on this file since 43667 was 41965, checked in by vboxsync, 12 years ago

VMM: ran scm. Mostly svn:keywords changes (adding Revision).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 38.3 KB
Line 
1/* $Id: TMAllVirtual.cpp 41965 2012-06-29 02:52:49Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# ifdef VBOX_WITH_REM
27# include <VBox/vmm/rem.h>
28# endif
29# include <iprt/thread.h>
30#endif
31#include "TMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/vmm/vmm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/sup.h>
37
38#include <iprt/time.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/asm-math.h>
42
43
44
45/**
46 * Helper function that's used by the assembly routines when something goes bust.
47 *
48 * @param pData Pointer to the data structure.
49 * @param u64NanoTS The calculated nano ts.
50 * @param u64DeltaPrev The delta relative to the previously returned timestamp.
51 * @param u64PrevNanoTS The previously returned timestamp (as it was read it).
52 */
53DECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
54{
55 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
56 pData->cBadPrev++;
57 if ((int64_t)u64DeltaPrev < 0)
58 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
59 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
60 else
61 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
62 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
63}
64
65
66/**
67 * Called the first time somebody asks for the time or when the GIP
68 * is mapped/unmapped.
69 *
70 * This should never ever happen.
71 */
72DECLEXPORT(uint64_t) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
73{
74 NOREF(pData);
75 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
76 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
77 AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
78#ifndef _MSC_VER
79 return 0; /* gcc false positive warning */
80#endif
81}
82
83
84#if 1
85
86/**
87 * Wrapper around the IPRT GIP time methods.
88 */
89DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
90{
91# ifdef IN_RING3
92 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
93# else /* !IN_RING3 */
94 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
95 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
96 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
97 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
98# endif /* !IN_RING3 */
99 /*DBGFTRACE_POS_U64(pVM, u64);*/
100 return u64;
101}
102
103#else
104
105/**
106 * This is (mostly) the same as rtTimeNanoTSInternal() except
107 * for the two globals which live in TM.
108 *
109 * @returns Nanosecond timestamp.
110 * @param pVM Pointer to the VM.
111 */
112static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
113{
114 uint64_t u64Delta;
115 uint32_t u32NanoTSFactor0;
116 uint64_t u64TSC;
117 uint64_t u64NanoTS;
118 uint32_t u32UpdateIntervalTSC;
119 uint64_t u64PrevNanoTS;
120
121 /*
122 * Read the GIP data and the previous value.
123 */
124 for (;;)
125 {
126 uint32_t u32TransactionId;
127 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
128#ifdef IN_RING3
129 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
130 return RTTimeSystemNanoTS();
131#endif
132
133 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
134 {
135 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
136#ifdef RT_OS_L4
137 Assert((u32TransactionId & 1) == 0);
138#endif
139 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
140 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
141 u64TSC = pGip->aCPUs[0].u64TSC;
142 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
143 u64Delta = ASMReadTSC();
144 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
145 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
146 || (u32TransactionId & 1)))
147 continue;
148 }
149 else
150 {
151 /* SUPGIPMODE_ASYNC_TSC */
152 PSUPGIPCPU pGipCpu;
153
154 uint8_t u8ApicId = ASMGetApicId();
155 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
156 pGipCpu = &pGip->aCPUs[u8ApicId];
157 else
158 {
159 AssertMsgFailed(("%x\n", u8ApicId));
160 pGipCpu = &pGip->aCPUs[0];
161 }
162
163 u32TransactionId = pGipCpu->u32TransactionId;
164#ifdef RT_OS_L4
165 Assert((u32TransactionId & 1) == 0);
166#endif
167 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
168 u64NanoTS = pGipCpu->u64NanoTS;
169 u64TSC = pGipCpu->u64TSC;
170 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
171 u64Delta = ASMReadTSC();
172 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
173#ifdef IN_RC
174 Assert(!(ASMGetFlags() & X86_EFL_IF));
175#else
176 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
177 continue;
178 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
179 || (u32TransactionId & 1)))
180 continue;
181#endif
182 }
183 break;
184 }
185
186 /*
187 * Calc NanoTS delta.
188 */
189 u64Delta -= u64TSC;
190 if (u64Delta > u32UpdateIntervalTSC)
191 {
192 /*
193 * We've expired the interval, cap it. If we're here for the 2nd
194 * time without any GIP update in-between, the checks against
195 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
196 */
197 u64Delta = u32UpdateIntervalTSC;
198 }
199#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
200 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
201 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
202#else
203 __asm
204 {
205 mov eax, dword ptr [u64Delta]
206 mul dword ptr [u32NanoTSFactor0]
207 div dword ptr [u32UpdateIntervalTSC]
208 mov dword ptr [u64Delta], eax
209 xor edx, edx
210 mov dword ptr [u64Delta + 4], edx
211 }
212#endif
213
214 /*
215 * Calculate the time and compare it with the previously returned value.
216 *
217 * Since this function is called *very* frequently when the VM is running
218 * and then mostly on EMT, we can restrict the valid range of the delta
219 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
220 */
221 u64NanoTS += u64Delta;
222 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
223 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
224 /* frequent - less than 1s since last call. */;
225 else if ( (int64_t)u64DeltaPrev < 0
226 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
227 {
228 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
229 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps);
230 u64NanoTS = u64PrevNanoTS + 1;
231#ifndef IN_RING3
232 VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
233#endif
234 }
235 else if (u64PrevNanoTS)
236 {
237 /* Something has gone bust, if negative offset it's real bad. */
238 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).cBadPrev);
239 if ((int64_t)u64DeltaPrev < 0)
240 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
241 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
242 else
243 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
244 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
245#ifdef DEBUG_bird
246 /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
247 AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
248 ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
249 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
250#endif
251 }
252 /* else: We're resuming (see TMVirtualResume). */
253 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
254 return u64NanoTS;
255
256 /*
257 * Attempt updating the previous value, provided we're still ahead of it.
258 *
259 * There is no point in recalculating u64NanoTS because we got preempted or if
260 * we raced somebody while the GIP was updated, since these are events
261 * that might occur at any point in the return path as well.
262 */
263 for (int cTries = 50;;)
264 {
265 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
266 if (u64PrevNanoTS >= u64NanoTS)
267 break;
268 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
269 break;
270 AssertBreak(--cTries <= 0);
271 if (cTries < 25 && !VM_IS_EMT(pVM)) /* give up early */
272 break;
273 }
274
275 return u64NanoTS;
276}
277
278#endif
279
280
281/**
282 * Get the time when we're not running at 100%
283 *
284 * @returns The timestamp.
285 * @param pVM Pointer to the VM.
286 */
287static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
288{
289 /*
290 * Recalculate the RTTimeNanoTS() value for the period where
291 * warp drive has been enabled.
292 */
293 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
294 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
295 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
296 u64 /= 100;
297 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
298
299 /*
300 * Now we apply the virtual time offset.
301 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
302 * machine started if it had been running continuously without any suspends.)
303 */
304 u64 -= pVM->tm.s.u64VirtualOffset;
305 return u64;
306}
307
308
309/**
310 * Get the raw virtual time.
311 *
312 * @returns The current time stamp.
313 * @param pVM Pointer to the VM.
314 */
315DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
316{
317 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
318 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
319 return tmVirtualGetRawNonNormal(pVM);
320}
321
322
323/**
324 * Inlined version of tmVirtualGetEx.
325 */
326DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
327{
328 uint64_t u64;
329 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
330 {
331 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
332 u64 = tmVirtualGetRaw(pVM);
333
334 /*
335 * Use the chance to check for expired timers.
336 */
337 if (fCheckTimers)
338 {
339 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
340 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
341 && !pVM->tm.s.fRunningQueues
342 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
343 || ( pVM->tm.s.fVirtualSyncTicking
344 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
345 )
346 )
347 && !pVM->tm.s.fRunningQueues
348 )
349 {
350 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
351 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
352 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
353#ifdef IN_RING3
354# ifdef VBOX_WITH_REM
355 REMR3NotifyTimerPending(pVM, pVCpuDst);
356# endif
357 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
358#endif
359 }
360 }
361 }
362 else
363 u64 = pVM->tm.s.u64Virtual;
364 return u64;
365}
366
367
368/**
369 * Gets the current TMCLOCK_VIRTUAL time
370 *
371 * @returns The timestamp.
372 * @param pVM Pointer to the VM.
373 *
374 * @remark While the flow of time will never go backwards, the speed of the
375 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
376 * influenced by power saving (SpeedStep, PowerNow!), while the former
377 * makes use of TSC and kernel timers.
378 */
379VMM_INT_DECL(uint64_t) TMVirtualGet(PVM pVM)
380{
381 return tmVirtualGet(pVM, true /*fCheckTimers*/);
382}
383
384
385/**
386 * Gets the current TMCLOCK_VIRTUAL time without checking
387 * timers or anything.
388 *
389 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
390 *
391 * @returns The timestamp.
392 * @param pVM Pointer to the VM.
393 *
394 * @remarks See TMVirtualGet.
395 */
396VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
397{
398 return tmVirtualGet(pVM, false /*fCheckTimers*/);
399}
400
401
402/**
403 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
404 *
405 * @returns Host nano second count.
406 * @param pVM Pointer to the VM.
407 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
408 */
409DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
410{
411 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
412 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
413 return cVirtTicksToDeadline;
414}
415
416
417/**
418 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
419 *
420 * @returns The timestamp.
421 * @param pVM Pointer to the VM.
422 * @param u64 raw virtual time.
423 * @param off offVirtualSync.
424 * @param pcNsToDeadline Where to return the number of nano seconds to
425 * the next virtual sync timer deadline. Can be
426 * NULL.
427 */
428DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
429{
430 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
431
432 /*
433 * Don't make updates until we've check the timer queue.
434 */
435 bool fUpdatePrev = true;
436 bool fUpdateOff = true;
437 bool fStop = false;
438 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
439 uint64_t u64Delta = u64 - u64Prev;
440 if (RT_LIKELY(!(u64Delta >> 32)))
441 {
442 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
443 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
444 {
445 off -= u64Sub;
446 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
447 }
448 else
449 {
450 /* we've completely caught up. */
451 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
452 off = pVM->tm.s.offVirtualSyncGivenUp;
453 fStop = true;
454 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
455 }
456 }
457 else
458 {
459 /* More than 4 seconds since last time (or negative), ignore it. */
460 fUpdateOff = false;
461 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
462 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
463 }
464
465 /*
466 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
467 * approach is to never pass the head timer. So, when we do stop the clock and
468 * set the timer pending flag.
469 */
470 u64 -= off;
471
472 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
473 if (u64Last > u64)
474 {
475 u64 = u64Last + 1;
476 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
477 }
478
479 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
480 if (u64 < u64Expire)
481 {
482 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
483 if (fUpdateOff)
484 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
485 if (fStop)
486 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
487 if (fUpdatePrev)
488 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
489 if (pcNsToDeadline)
490 {
491 uint64_t cNsToDeadline = u64Expire - u64;
492 if (pVM->tm.s.fVirtualSyncCatchUp)
493 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
494 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
495 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
496 }
497 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
498 }
499 else
500 {
501 u64 = u64Expire;
502 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
503 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
504
505 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
506 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
507 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
508 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
509 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
510 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
511
512 if (pcNsToDeadline)
513 *pcNsToDeadline = 0;
514#ifdef IN_RING3
515# ifdef VBOX_WITH_REM
516 REMR3NotifyTimerPending(pVM, pVCpuDst);
517# endif
518 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
519#endif
520 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
521 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
522 }
523 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
524
525 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
526 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
527 return u64;
528}
529
530
531/**
532 * tmVirtualSyncGetEx worker for when we get the lock.
533 *
534 * @returns timesamp.
535 * @param pVM Pointer to the VM.
536 * @param u64 The virtual clock timestamp.
537 * @param pcNsToDeadline Where to return the number of nano seconds to
538 * the next virtual sync timer deadline. Can be
539 * NULL.
540 */
541DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64, uint64_t *pcNsToDeadline)
542{
543 /*
544 * Not ticking?
545 */
546 if (!pVM->tm.s.fVirtualSyncTicking)
547 {
548 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
549 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
550 if (pcNsToDeadline)
551 *pcNsToDeadline = 0;
552 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
553 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
554 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
555 return u64;
556 }
557
558 /*
559 * Handle catch up in a separate function.
560 */
561 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
562 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
563 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline);
564
565 /*
566 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
567 * approach is to never pass the head timer. So, when we do stop the clock and
568 * set the timer pending flag.
569 */
570 u64 -= off;
571
572 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
573 if (u64Last > u64)
574 {
575 u64 = u64Last + 1;
576 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
577 }
578
579 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
580 if (u64 < u64Expire)
581 {
582 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
583 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
584 if (pcNsToDeadline)
585 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
586 }
587 else
588 {
589 u64 = u64Expire;
590 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
591 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
592
593 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
594 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
595 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
596 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
597 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
598 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
599
600#ifdef IN_RING3
601# ifdef VBOX_WITH_REM
602 REMR3NotifyTimerPending(pVM, pVCpuDst);
603# endif
604 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
605#endif
606 if (pcNsToDeadline)
607 *pcNsToDeadline = 0;
608 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
609 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
610 }
611 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
612 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
613 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
614 return u64;
615}
616
617
618/**
619 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
620 *
621 * @returns The timestamp.
622 * @param pVM Pointer to the VM.
623 * @param fCheckTimers Check timers or not
624 * @param pcNsToDeadline Where to return the number of nano seconds to
625 * the next virtual sync timer deadline. Can be
626 * NULL.
627 * @thread EMT.
628 */
629DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers, uint64_t *pcNsToDeadline)
630{
631 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
632
633 uint64_t u64;
634 if (!pVM->tm.s.fVirtualSyncTicking)
635 {
636 if (pcNsToDeadline)
637 *pcNsToDeadline = 0;
638 u64 = pVM->tm.s.u64VirtualSync;
639 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
640 return u64;
641 }
642
643 /*
644 * Query the virtual clock and do the usual expired timer check.
645 */
646 Assert(pVM->tm.s.cVirtualTicking);
647 u64 = tmVirtualGetRaw(pVM);
648 if (fCheckTimers)
649 {
650 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
651 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
652 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
653 {
654 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
655 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
656#ifdef IN_RING3
657# ifdef VBOX_WITH_REM
658 REMR3NotifyTimerPending(pVM, pVCpuDst);
659# endif
660 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
661#endif
662 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
663 }
664 }
665
666 /*
667 * If we can get the lock, get it. The result is much more reliable.
668 *
669 * Note! This is where all clock source devices branch off because they
670 * will be owning the lock already. The 'else' is taken by code
671 * which is less picky or hasn't been adjusted yet
672 */
673 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
674 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
675
676 /*
677 * When the clock is ticking, not doing catch ups and not running into an
678 * expired time, we can get away without locking. Try this first.
679 */
680 uint64_t off;
681 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
682 {
683 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
684 {
685 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
686 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
687 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
688 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
689 {
690 off = u64 - off;
691 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
692 if (off < u64Expire)
693 {
694 if (pcNsToDeadline)
695 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
696 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
697 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
698 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
699 return off;
700 }
701 }
702 }
703 }
704 else
705 {
706 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
707 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
708 {
709 if (pcNsToDeadline)
710 *pcNsToDeadline = 0;
711 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
712 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
713 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
714 return off;
715 }
716 }
717
718 /*
719 * Read the offset and adjust if we're playing catch-up.
720 *
721 * The catch-up adjusting work by us decrementing the offset by a percentage of
722 * the time elapsed since the previous TMVirtualGetSync call.
723 *
724 * It's possible to get a very long or even negative interval between two read
725 * for the following reasons:
726 * - Someone might have suspended the process execution, frequently the case when
727 * debugging the process.
728 * - We might be on a different CPU which TSC isn't quite in sync with the
729 * other CPUs in the system.
730 * - Another thread is racing us and we might have been preempted while inside
731 * this function.
732 *
733 * Assuming nano second virtual time, we can simply ignore any intervals which has
734 * any of the upper 32 bits set.
735 */
736 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
737 int cOuterTries = 42;
738 for (;; cOuterTries--)
739 {
740 /* Try grab the lock, things get simpler when owning the lock. */
741 int rcLock = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
742 if (RT_SUCCESS_NP(rcLock))
743 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
744
745 /* Re-check the ticking flag. */
746 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
747 {
748 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
749 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
750 && cOuterTries > 0)
751 continue;
752 if (pcNsToDeadline)
753 *pcNsToDeadline = 0;
754 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
755 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
756 return off;
757 }
758
759 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
760 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
761 {
762 /* No changes allowed, try get a consistent set of parameters. */
763 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
764 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
765 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
766 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
767 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
768 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
769 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
770 || cOuterTries <= 0)
771 {
772 uint64_t u64Delta = u64 - u64Prev;
773 if (RT_LIKELY(!(u64Delta >> 32)))
774 {
775 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
776 if (off > u64Sub + offGivenUp)
777 {
778 off -= u64Sub;
779 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
780 }
781 else
782 {
783 /* we've completely caught up. */
784 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
785 off = offGivenUp;
786 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
787 }
788 }
789 else
790 /* More than 4 seconds since last time (or negative), ignore it. */
791 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
792
793 /* Check that we're still running and in catch up. */
794 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
795 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
796 break;
797 if (cOuterTries <= 0)
798 break; /* enough */
799 }
800 }
801 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
802 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
803 break; /* Got an consistent offset */
804 else if (cOuterTries <= 0)
805 break; /* enough */
806 }
807 if (cOuterTries <= 0)
808 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
809
810 /*
811 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
812 * approach is to never pass the head timer. So, when we do stop the clock and
813 * set the timer pending flag.
814 */
815 u64 -= off;
816/** @todo u64VirtualSyncLast */
817 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
818 if (u64 >= u64Expire)
819 {
820 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
821 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
822 {
823 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
824 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
825 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
826#ifdef IN_RING3
827# ifdef VBOX_WITH_REM
828 REMR3NotifyTimerPending(pVM, pVCpuDst);
829# endif
830 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
831#endif
832 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
833 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
834 }
835 else
836 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
837 if (pcNsToDeadline)
838 *pcNsToDeadline = 0;
839 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
840 }
841 else if (pcNsToDeadline)
842 {
843 uint64_t cNsToDeadline = u64Expire - u64;
844 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
845 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
846 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
847 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
848 }
849
850 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
851 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
852 return u64;
853}
854
855
856/**
857 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
858 *
859 * @returns The timestamp.
860 * @param pVM Pointer to the VM.
861 * @thread EMT.
862 * @remarks May set the timer and virtual sync FFs.
863 */
864VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVM pVM)
865{
866 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
867}
868
869
870/**
871 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
872 * TMCLOCK_VIRTUAL.
873 *
874 * @returns The timestamp.
875 * @param pVM Pointer to the VM.
876 * @thread EMT.
877 * @remarks May set the timer and virtual sync FFs.
878 */
879VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
880{
881 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
882}
883
884
885/**
886 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
887 *
888 * @returns The timestamp.
889 * @param pVM Pointer to the VM.
890 * @param fCheckTimers Check timers on the virtual clock or not.
891 * @thread EMT.
892 * @remarks May set the timer and virtual sync FFs.
893 */
894VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
895{
896 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/);
897}
898
899
900/**
901 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
902 * without checking timers running on TMCLOCK_VIRTUAL.
903 *
904 * @returns The timestamp.
905 * @param pVM Pointer to the VM.
906 * @param pcNsToDeadline Where to return the number of nano seconds to
907 * the next virtual sync timer deadline.
908 * @thread EMT.
909 * @remarks May set the timer and virtual sync FFs.
910 */
911VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVM pVM, uint64_t *pcNsToDeadline)
912{
913 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
914 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp);
915 *pcNsToDeadline = cNsToDeadlineTmp;
916 return u64Now;
917}
918
919
920/**
921 * Gets the number of nano seconds to the next virtual sync deadline.
922 *
923 * @returns The number of TMCLOCK_VIRTUAL ticks.
924 * @param pVM Pointer to the VM.
925 * @thread EMT.
926 * @remarks May set the timer and virtual sync FFs.
927 */
928VMM_INT_DECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVM pVM)
929{
930 uint64_t cNsToDeadline;
931 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline);
932 return cNsToDeadline;
933}
934
935
936/**
937 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
938 *
939 * @return The current lag.
940 * @param pVM Pointer to the VM.
941 */
942VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
943{
944 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
945}
946
947
948/**
949 * Get the current catch-up percent.
950 *
951 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
952 * @param pVM Pointer to the VM.
953 */
954VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
955{
956 if (pVM->tm.s.fVirtualSyncCatchUp)
957 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
958 return 0;
959}
960
961
962/**
963 * Gets the current TMCLOCK_VIRTUAL frequency.
964 *
965 * @returns The frequency.
966 * @param pVM Pointer to the VM.
967 */
968VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
969{
970 NOREF(pVM);
971 return TMCLOCK_FREQ_VIRTUAL;
972}
973
974
975/**
976 * Worker for TMR3PauseClocks.
977 *
978 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
979 * @param pVM Pointer to the VM.
980 */
981int tmVirtualPauseLocked(PVM pVM)
982{
983 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
984 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
985 if (c == 0)
986 {
987 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
988 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
989 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
990 }
991 return VINF_SUCCESS;
992}
993
994
995/**
996 * Worker for TMR3ResumeClocks.
997 *
998 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
999 * @param pVM Pointer to the VM.
1000 */
1001int tmVirtualResumeLocked(PVM pVM)
1002{
1003 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1004 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1005 if (c == 1)
1006 {
1007 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1008 pVM->tm.s.u64VirtualRawPrev = 0;
1009 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1010 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1011 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1012 }
1013 return VINF_SUCCESS;
1014}
1015
1016
1017/**
1018 * Converts from virtual ticks to nanoseconds.
1019 *
1020 * @returns nanoseconds.
1021 * @param pVM Pointer to the VM.
1022 * @param u64VirtualTicks The virtual ticks to convert.
1023 * @remark There could be rounding errors here. We just do a simple integer divide
1024 * without any adjustments.
1025 */
1026VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1027{
1028 NOREF(pVM);
1029 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1030 return u64VirtualTicks;
1031}
1032
1033
1034/**
1035 * Converts from virtual ticks to microseconds.
1036 *
1037 * @returns microseconds.
1038 * @param pVM Pointer to the VM.
1039 * @param u64VirtualTicks The virtual ticks to convert.
1040 * @remark There could be rounding errors here. We just do a simple integer divide
1041 * without any adjustments.
1042 */
1043VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1044{
1045 NOREF(pVM);
1046 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1047 return u64VirtualTicks / 1000;
1048}
1049
1050
1051/**
1052 * Converts from virtual ticks to milliseconds.
1053 *
1054 * @returns milliseconds.
1055 * @param pVM Pointer to the VM.
1056 * @param u64VirtualTicks The virtual ticks to convert.
1057 * @remark There could be rounding errors here. We just do a simple integer divide
1058 * without any adjustments.
1059 */
1060VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1061{
1062 NOREF(pVM);
1063 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1064 return u64VirtualTicks / 1000000;
1065}
1066
1067
1068/**
1069 * Converts from nanoseconds to virtual ticks.
1070 *
1071 * @returns virtual ticks.
1072 * @param pVM Pointer to the VM.
1073 * @param u64NanoTS The nanosecond value ticks to convert.
1074 * @remark There could be rounding and overflow errors here.
1075 */
1076VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1077{
1078 NOREF(pVM);
1079 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1080 return u64NanoTS;
1081}
1082
1083
1084/**
1085 * Converts from microseconds to virtual ticks.
1086 *
1087 * @returns virtual ticks.
1088 * @param pVM Pointer to the VM.
1089 * @param u64MicroTS The microsecond value ticks to convert.
1090 * @remark There could be rounding and overflow errors here.
1091 */
1092VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1093{
1094 NOREF(pVM);
1095 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1096 return u64MicroTS * 1000;
1097}
1098
1099
1100/**
1101 * Converts from milliseconds to virtual ticks.
1102 *
1103 * @returns virtual ticks.
1104 * @param pVM Pointer to the VM.
1105 * @param u64MilliTS The millisecond value ticks to convert.
1106 * @remark There could be rounding and overflow errors here.
1107 */
1108VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1109{
1110 NOREF(pVM);
1111 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1112 return u64MilliTS * 1000000;
1113}
1114
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use