VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 93744

Last change on this file since 93744 was 93744, checked in by vboxsync, 2 years ago

VMM: More arm64 adjustments. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 43.5 KB
Line 
1/* $Id: TMAllVirtual.cpp 93744 2022-02-14 21:00:26Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/vmm/vmm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/sup.h>
34
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39
40
41
42/**
43 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
44 */
45DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
46 uint64_t u64PrevNanoTS)
47{
48 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
49 pData->cBadPrev++;
50 if ((int64_t)u64DeltaPrev < 0)
51 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
52 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
53 else
54 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
55 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
56}
57
58
59#ifdef IN_RING3
60/**
61 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
62 */
63static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
64{
65 RT_NOREF(pData);
66 if (pExtra)
67 pExtra->uTSCValue = ASMReadTSC();
68 return RTTimeSystemNanoTS();
69}
70#endif
71
72
73/**
74 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
75 *
76 * This is the initial worker, so the first call in each context ends up here.
77 * It is also used should the delta rating of the host CPUs change or if the
78 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
79 * last two events may occur as CPUs are taken online.
80 */
81DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
82{
83 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
84 PFNTIMENANOTSINTERNAL pfnWorker;
85
86 /*
87 * We require a valid GIP for the selection below.
88 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
89 */
90 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
91#ifdef IN_RING3
92 if (pGip)
93#endif
94 {
95 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
96 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
97 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
98 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
99
100 /*
101 * Determine the new worker.
102 */
103#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
104 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
105#endif
106 switch (pGip->u32Mode)
107 {
108#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
109 case SUPGIPMODE_SYNC_TSC:
110 case SUPGIPMODE_INVARIANT_TSC:
111# ifdef IN_RING0
112 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
113 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
114 else
115 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
116# else
117 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
118 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
119 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
120 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
121 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
122 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
123 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
124 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
125 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
126 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
127 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
128 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
129 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
130 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
131 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
132 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
133 else
134 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
135 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
136 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
137# endif
138 break;
139
140 case SUPGIPMODE_ASYNC_TSC:
141# ifdef IN_RING0
142 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
143# else
144 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
145 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
146 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
147 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
148 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
149 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
150 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
151 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
152 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
153 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
154 else
155 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
156# endif
157 break;
158#endif
159 default:
160 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
161 }
162 }
163#ifdef IN_RING3
164 else
165 pfnWorker = tmR3VirtualNanoTSDriverless;
166#endif
167
168 /*
169 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
170 */
171 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker);
172 return pfnWorker(pData, pExtra);
173}
174
175
176/**
177 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
178 */
179DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
180 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
181{
182 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
183 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
184#ifndef _MSC_VER
185 return UINT64_MAX;
186#endif
187}
188
189
190/**
191 * Wrapper around the IPRT GIP time methods.
192 */
193DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
194{
195#ifdef IN_RING3
196 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/);
197#elif defined(IN_RING0)
198 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
199 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/);
200 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
201 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
202#else
203# error "unsupported context"
204#endif
205 /*DBGFTRACE_POS_U64(pVM, u64);*/
206 return u64;
207}
208
209
210/**
211 * Wrapper around the IPRT GIP time methods, extended version.
212 */
213DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
214{
215 RTITMENANOTSEXTRA Extra;
216#ifdef IN_RING3
217 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra);
218#elif defined(IN_RING0)
219 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
220 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra);
221 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
222 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
223#else
224# error "unsupported context"
225#endif
226 if (puTscNow)
227 *puTscNow = Extra.uTSCValue;
228 /*DBGFTRACE_POS_U64(pVM, u64);*/
229 return u64;
230}
231
232
233/**
234 * Get the time when we're not running at 100%
235 *
236 * @returns The timestamp.
237 * @param pVM The cross context VM structure.
238 * @param puTscNow Where to return the TSC corresponding to the returned
239 * timestamp (delta adjusted). Optional.
240 */
241static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
242{
243 /*
244 * Recalculate the RTTimeNanoTS() value for the period where
245 * warp drive has been enabled.
246 */
247 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
248 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
249 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
250 u64 /= 100;
251 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
252
253 /*
254 * Now we apply the virtual time offset.
255 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
256 * machine started if it had been running continuously without any suspends.)
257 */
258 u64 -= pVM->tm.s.u64VirtualOffset;
259 return u64;
260}
261
262
263/**
264 * Get the raw virtual time.
265 *
266 * @returns The current time stamp.
267 * @param pVM The cross context VM structure.
268 */
269DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
270{
271 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
272 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
273 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
274}
275
276
277/**
278 * Get the raw virtual time, extended version.
279 *
280 * @returns The current time stamp.
281 * @param pVM The cross context VM structure.
282 * @param puTscNow Where to return the TSC corresponding to the returned
283 * timestamp (delta adjusted). Optional.
284 */
285DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
286{
287 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
288 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
289 return tmVirtualGetRawNonNormal(pVM, puTscNow);
290}
291
292
293/**
294 * Inlined version of tmVirtualGetEx.
295 */
296DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
297{
298 uint64_t u64;
299 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
300 {
301 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
302 u64 = tmVirtualGetRaw(pVM);
303
304 /*
305 * Use the chance to check for expired timers.
306 */
307 if (fCheckTimers)
308 {
309 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
310 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
311 && !pVM->tm.s.fRunningQueues
312 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
313 || ( pVM->tm.s.fVirtualSyncTicking
314 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
315 )
316 )
317 && !pVM->tm.s.fRunningQueues
318 )
319 {
320 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
321 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
322 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
323#ifdef IN_RING3
324 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
325#endif
326 }
327 }
328 }
329 else
330 u64 = pVM->tm.s.u64Virtual;
331 return u64;
332}
333
334
335/**
336 * Gets the current TMCLOCK_VIRTUAL time
337 *
338 * @returns The timestamp.
339 * @param pVM The cross context VM structure.
340 *
341 * @remark While the flow of time will never go backwards, the speed of the
342 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
343 * influenced by power saving (SpeedStep, PowerNow!), while the former
344 * makes use of TSC and kernel timers.
345 */
346VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
347{
348 return tmVirtualGet(pVM, true /*fCheckTimers*/);
349}
350
351
352/**
353 * Gets the current TMCLOCK_VIRTUAL time without checking
354 * timers or anything.
355 *
356 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
357 *
358 * @returns The timestamp.
359 * @param pVM The cross context VM structure.
360 *
361 * @remarks See TMVirtualGet.
362 */
363VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
364{
365 return tmVirtualGet(pVM, false /*fCheckTimers*/);
366}
367
368
369/**
370 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
371 *
372 * @returns Host nano second count.
373 * @param pVM The cross context VM structure.
374 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
375 */
376DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
377{
378 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
379 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
380 return cVirtTicksToDeadline;
381}
382
383
384/**
385 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
386 *
387 * @returns The timestamp.
388 * @param pVM The cross context VM structure.
389 * @param u64 raw virtual time.
390 * @param off offVirtualSync.
391 * @param pcNsToDeadline Where to return the number of nano seconds to
392 * the next virtual sync timer deadline. Can be
393 * NULL.
394 * @param pnsAbsDeadline Where to return the absolute deadline.
395 * Optional.
396 */
397DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
398 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
399{
400 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
401
402 /*
403 * Don't make updates until we've check the timer queue.
404 */
405 bool fUpdatePrev = true;
406 bool fUpdateOff = true;
407 bool fStop = false;
408 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
409 uint64_t u64Delta = u64 - u64Prev;
410 if (RT_LIKELY(!(u64Delta >> 32)))
411 {
412 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
413 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
414 {
415 off -= u64Sub;
416 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
417 }
418 else
419 {
420 /* we've completely caught up. */
421 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
422 off = pVM->tm.s.offVirtualSyncGivenUp;
423 fStop = true;
424 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
425 }
426 }
427 else
428 {
429 /* More than 4 seconds since last time (or negative), ignore it. */
430 fUpdateOff = false;
431 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
432 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
433 }
434
435 /*
436 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
437 * approach is to never pass the head timer. So, when we do stop the clock and
438 * set the timer pending flag.
439 */
440 u64 -= off;
441
442 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
443 if (u64Last > u64)
444 {
445 u64 = u64Last + 1;
446 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
447 }
448
449 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
450 if (pnsAbsDeadline)
451 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
452 thru this code over an over again even if there aren't any timer changes. */
453 if (u64 < u64Expire)
454 {
455 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
456 if (fUpdateOff)
457 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
458 if (fStop)
459 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
460 if (fUpdatePrev)
461 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
462 if (pcNsToDeadline)
463 {
464 uint64_t cNsToDeadline = u64Expire - u64;
465 if (pVM->tm.s.fVirtualSyncCatchUp)
466 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
467 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
468 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
469 }
470 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
471 }
472 else
473 {
474 u64 = u64Expire;
475 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
476 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
477
478 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
479 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
480 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
481 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
482 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
483 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
484
485 if (pcNsToDeadline)
486 *pcNsToDeadline = 0;
487#ifdef IN_RING3
488 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
489#endif
490 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
491 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
492 }
493 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
494
495 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
496 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
497 return u64;
498}
499
500
501/**
502 * tmVirtualSyncGetEx worker for when we get the lock.
503 *
504 * @returns timesamp.
505 * @param pVM The cross context VM structure.
506 * @param u64 The virtual clock timestamp.
507 * @param pcNsToDeadline Where to return the number of nano seconds to
508 * the next virtual sync timer deadline. Can be
509 * NULL.
510 * @param pnsAbsDeadline Where to return the absolute deadline.
511 * Optional.
512 */
513DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
514{
515 /*
516 * Not ticking?
517 */
518 if (!pVM->tm.s.fVirtualSyncTicking)
519 {
520 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
521 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
522 if (pcNsToDeadline)
523 *pcNsToDeadline = 0;
524 if (pnsAbsDeadline)
525 *pnsAbsDeadline = u64;
526 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
527 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
528 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
529 return u64;
530 }
531
532 /*
533 * Handle catch up in a separate function.
534 */
535 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
536 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
537 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
538
539 /*
540 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
541 * approach is to never pass the head timer. So, when we do stop the clock and
542 * set the timer pending flag.
543 */
544 u64 -= off;
545
546 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
547 if (u64Last > u64)
548 {
549 u64 = u64Last + 1;
550 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
551 }
552
553 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
554 if (pnsAbsDeadline)
555 *pnsAbsDeadline = u64Expire;
556 if (u64 < u64Expire)
557 {
558 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
559 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
560 if (pcNsToDeadline)
561 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
562 }
563 else
564 {
565 u64 = u64Expire;
566 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
567 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
568
569 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
570 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
571 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
572 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
573 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
574 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
575
576#ifdef IN_RING3
577 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
578#endif
579 if (pcNsToDeadline)
580 *pcNsToDeadline = 0;
581 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
582 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
583 }
584 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
585 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
586 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
587 return u64;
588}
589
590
591/**
592 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
593 *
594 * @returns The timestamp.
595 * @param pVM The cross context VM structure.
596 * @param fCheckTimers Check timers or not
597 * @param pcNsToDeadline Where to return the number of nano seconds to
598 * the next virtual sync timer deadline. Can be
599 * NULL.
600 * @param pnsAbsDeadline Where to return the absolute deadline.
601 * Optional.
602 * @param puTscNow Where to return the TSC corresponding to the
603 * returned timestamp (delta adjusted). Optional.
604 * @thread EMT.
605 */
606DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
607 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
608{
609 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
610
611 uint64_t u64;
612 if (!pVM->tm.s.fVirtualSyncTicking)
613 {
614 if (pcNsToDeadline)
615 *pcNsToDeadline = 0;
616 u64 = pVM->tm.s.u64VirtualSync;
617 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
618 return u64;
619 }
620
621 /*
622 * Query the virtual clock and do the usual expired timer check.
623 */
624 Assert(pVM->tm.s.cVirtualTicking);
625 u64 = tmVirtualGetRawEx(pVM, puTscNow);
626 if (fCheckTimers)
627 {
628 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
629 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
630 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
631 {
632 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
633 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
634#ifdef IN_RING3
635 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
636#endif
637 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
638 }
639 }
640
641 /*
642 * If we can get the lock, get it. The result is much more reliable.
643 *
644 * Note! This is where all clock source devices branch off because they
645 * will be owning the lock already. The 'else' is taken by code
646 * which is less picky or hasn't been adjusted yet
647 */
648 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
649 * here and the remainder of this function in a static worker. */
650 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
651 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
652
653 /*
654 * When the clock is ticking, not doing catch ups and not running into an
655 * expired time, we can get away without locking. Try this first.
656 */
657 uint64_t off;
658 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
659 {
660 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
661 {
662 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
663 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
664 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
665 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
666 {
667 off = u64 - off;
668 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
669 if (off < u64Expire)
670 {
671 if (pnsAbsDeadline)
672 *pnsAbsDeadline = u64Expire;
673 if (pcNsToDeadline)
674 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
675 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
676 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
677 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
678 return off;
679 }
680 }
681 }
682 }
683 else
684 {
685 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
686 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
687 {
688 if (pcNsToDeadline)
689 *pcNsToDeadline = 0;
690 if (pnsAbsDeadline)
691 *pnsAbsDeadline = off;
692 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
693 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
694 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
695 return off;
696 }
697 }
698
699 /*
700 * Read the offset and adjust if we're playing catch-up.
701 *
702 * The catch-up adjusting work by us decrementing the offset by a percentage of
703 * the time elapsed since the previous TMVirtualGetSync call.
704 *
705 * It's possible to get a very long or even negative interval between two read
706 * for the following reasons:
707 * - Someone might have suspended the process execution, frequently the case when
708 * debugging the process.
709 * - We might be on a different CPU which TSC isn't quite in sync with the
710 * other CPUs in the system.
711 * - Another thread is racing us and we might have been preempted while inside
712 * this function.
713 *
714 * Assuming nano second virtual time, we can simply ignore any intervals which has
715 * any of the upper 32 bits set.
716 */
717 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
718 int cOuterTries = 42;
719 for (;; cOuterTries--)
720 {
721 /* Try grab the lock, things get simpler when owning the lock. */
722 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
723 if (RT_SUCCESS_NP(rcLock))
724 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
725
726 /* Re-check the ticking flag. */
727 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
728 {
729 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
730 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
731 && cOuterTries > 0)
732 continue;
733 if (pcNsToDeadline)
734 *pcNsToDeadline = 0;
735 if (pnsAbsDeadline)
736 *pnsAbsDeadline = off;
737 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
738 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
739 return off;
740 }
741
742 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
743 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
744 {
745 /* No changes allowed, try get a consistent set of parameters. */
746 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
747 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
748 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
749 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
750 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
751 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
752 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
753 || cOuterTries <= 0)
754 {
755 uint64_t u64Delta = u64 - u64Prev;
756 if (RT_LIKELY(!(u64Delta >> 32)))
757 {
758 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
759 if (off > u64Sub + offGivenUp)
760 {
761 off -= u64Sub;
762 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
763 }
764 else
765 {
766 /* we've completely caught up. */
767 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
768 off = offGivenUp;
769 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
770 }
771 }
772 else
773 /* More than 4 seconds since last time (or negative), ignore it. */
774 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
775
776 /* Check that we're still running and in catch up. */
777 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
778 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
779 break;
780 if (cOuterTries <= 0)
781 break; /* enough */
782 }
783 }
784 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
785 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
786 break; /* Got an consistent offset */
787 else if (cOuterTries <= 0)
788 break; /* enough */
789 }
790 if (cOuterTries <= 0)
791 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
792
793 /*
794 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
795 * approach is to never pass the head timer. So, when we do stop the clock and
796 * set the timer pending flag.
797 */
798 u64 -= off;
799/** @todo u64VirtualSyncLast */
800 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
801 if (pnsAbsDeadline)
802 *pnsAbsDeadline = u64Expire;
803 if (u64 >= u64Expire)
804 {
805 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
806 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
807 {
808 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
809 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
810 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
811#ifdef IN_RING3
812 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
813#endif
814 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
815 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
816 }
817 else
818 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
819 if (pcNsToDeadline)
820 *pcNsToDeadline = 0;
821 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
822 }
823 else if (pcNsToDeadline)
824 {
825 uint64_t cNsToDeadline = u64Expire - u64;
826 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
827 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
828 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
829 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
830 }
831
832 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
833 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
834 return u64;
835}
836
837
838/**
839 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
840 *
841 * @returns The timestamp.
842 * @param pVM The cross context VM structure.
843 * @thread EMT.
844 * @remarks May set the timer and virtual sync FFs.
845 */
846VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
847{
848 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
849}
850
851
852/**
853 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
854 * TMCLOCK_VIRTUAL.
855 *
856 * @returns The timestamp.
857 * @param pVM The cross context VM structure.
858 * @thread EMT.
859 * @remarks May set the timer and virtual sync FFs.
860 */
861VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
862{
863 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
864}
865
866
867/**
868 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
869 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
870 *
871 * @returns The timestamp.
872 * @param pVM The cross context VM structure.
873 * @param puTscNow Where to return the TSC value that the return
874 * value is relative to. This is delta adjusted.
875 * @thread EMT.
876 * @remarks May set the timer and virtual sync FFs.
877 */
878VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
879{
880 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
881}
882
883
884/**
885 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
886 *
887 * @returns The timestamp.
888 * @param pVM The cross context VM structure.
889 * @param fCheckTimers Check timers on the virtual clock or not.
890 * @thread EMT.
891 * @remarks May set the timer and virtual sync FFs.
892 */
893VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
894{
895 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
896}
897
898
899/**
900 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
901 * without checking timers running on TMCLOCK_VIRTUAL.
902 *
903 * @returns The timestamp.
904 * @param pVM The cross context VM structure.
905 * @param pcNsToDeadline Where to return the number of nano seconds to
906 * the next virtual sync timer deadline.
907 * @param puTscNow Where to return the TSC value that the return
908 * value is relative to. This is delta adjusted.
909 * @param puDeadlineVersion Where to return the deadline "version" number.
910 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
911 * to check if the absolute deadline is still up to
912 * date and the caller can skip calling this
913 * function.
914 * @thread EMT.
915 * @remarks May set the timer and virtual sync FFs.
916 */
917VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
918 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
919{
920 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
921 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
922 *pcNsToDeadline = cNsToDeadlineTmp;
923 return u64Now;
924}
925
926
927/**
928 * Gets the number of nano seconds to the next virtual sync deadline.
929 *
930 * @returns The number of TMCLOCK_VIRTUAL ticks.
931 * @param pVM The cross context VM structure.
932 * @param puTscNow Where to return the TSC value that the return
933 * value is relative to. This is delta adjusted.
934 * @param puDeadlineVersion Where to return the deadline "version" number.
935 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
936 * to check if the absolute deadline is still up to
937 * date and the caller can skip calling this
938 * function.
939 * @thread EMT.
940 * @remarks May set the timer and virtual sync FFs.
941 */
942VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
943{
944 uint64_t cNsToDeadline;
945 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
946 return cNsToDeadline;
947}
948
949
950/**
951 * Checks if the given deadline is still current.
952 *
953 * @retval true if the deadline is still current.
954 * @retval false if the deadline is outdated.
955 * @param pVM The cross context VM structure.
956 * @param uDeadlineVersion The deadline version to check.
957 */
958VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
959{
960 /** @todo Try use ASMAtomicUoReadU64 instead. */
961 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
962 return u64Expire == uDeadlineVersion;
963}
964
965
966/**
967 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
968 *
969 * @return The current lag.
970 * @param pVM The cross context VM structure.
971 */
972VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
973{
974 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
975}
976
977
978/**
979 * Get the current catch-up percent.
980 *
981 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
982 * @param pVM The cross context VM structure.
983 */
984VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
985{
986 if (pVM->tm.s.fVirtualSyncCatchUp)
987 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
988 return 0;
989}
990
991
992/**
993 * Gets the current TMCLOCK_VIRTUAL frequency.
994 *
995 * @returns The frequency.
996 * @param pVM The cross context VM structure.
997 */
998VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
999{
1000 NOREF(pVM);
1001 return TMCLOCK_FREQ_VIRTUAL;
1002}
1003
1004
1005/**
1006 * Worker for TMR3PauseClocks.
1007 *
1008 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1009 * @param pVM The cross context VM structure.
1010 */
1011int tmVirtualPauseLocked(PVMCC pVM)
1012{
1013 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
1014 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1015 if (c == 0)
1016 {
1017 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1018 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1019 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
1020 }
1021 return VINF_SUCCESS;
1022}
1023
1024
1025/**
1026 * Worker for TMR3ResumeClocks.
1027 *
1028 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1029 * @param pVM The cross context VM structure.
1030 */
1031int tmVirtualResumeLocked(PVMCC pVM)
1032{
1033 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1034 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1035 if (c == 1)
1036 {
1037 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1038 pVM->tm.s.u64VirtualRawPrev = 0;
1039 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1040 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1041 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1042 }
1043 return VINF_SUCCESS;
1044}
1045
1046
1047/**
1048 * Converts from virtual ticks to nanoseconds.
1049 *
1050 * @returns nanoseconds.
1051 * @param pVM The cross context VM structure.
1052 * @param u64VirtualTicks The virtual ticks to convert.
1053 * @remark There could be rounding errors here. We just do a simple integer divide
1054 * without any adjustments.
1055 */
1056VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1057{
1058 NOREF(pVM);
1059 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1060 return u64VirtualTicks;
1061}
1062
1063
1064/**
1065 * Converts from virtual ticks to microseconds.
1066 *
1067 * @returns microseconds.
1068 * @param pVM The cross context VM structure.
1069 * @param u64VirtualTicks The virtual ticks to convert.
1070 * @remark There could be rounding errors here. We just do a simple integer divide
1071 * without any adjustments.
1072 */
1073VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1074{
1075 NOREF(pVM);
1076 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1077 return u64VirtualTicks / 1000;
1078}
1079
1080
1081/**
1082 * Converts from virtual ticks to milliseconds.
1083 *
1084 * @returns milliseconds.
1085 * @param pVM The cross context VM structure.
1086 * @param u64VirtualTicks The virtual ticks to convert.
1087 * @remark There could be rounding errors here. We just do a simple integer divide
1088 * without any adjustments.
1089 */
1090VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1091{
1092 NOREF(pVM);
1093 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1094 return u64VirtualTicks / 1000000;
1095}
1096
1097
1098/**
1099 * Converts from nanoseconds to virtual ticks.
1100 *
1101 * @returns virtual ticks.
1102 * @param pVM The cross context VM structure.
1103 * @param u64NanoTS The nanosecond value ticks to convert.
1104 * @remark There could be rounding and overflow errors here.
1105 */
1106VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1107{
1108 NOREF(pVM);
1109 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1110 return u64NanoTS;
1111}
1112
1113
1114/**
1115 * Converts from microseconds to virtual ticks.
1116 *
1117 * @returns virtual ticks.
1118 * @param pVM The cross context VM structure.
1119 * @param u64MicroTS The microsecond value ticks to convert.
1120 * @remark There could be rounding and overflow errors here.
1121 */
1122VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1123{
1124 NOREF(pVM);
1125 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1126 return u64MicroTS * 1000;
1127}
1128
1129
1130/**
1131 * Converts from milliseconds to virtual ticks.
1132 *
1133 * @returns virtual ticks.
1134 * @param pVM The cross context VM structure.
1135 * @param u64MilliTS The millisecond value ticks to convert.
1136 * @remark There could be rounding and overflow errors here.
1137 */
1138VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1139{
1140 NOREF(pVM);
1141 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1142 return u64MilliTS * 1000000;
1143}
1144
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use