VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

Last change on this file was 101025, checked in by vboxsync, 9 months ago

VMM/VMMAll: Compilation fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 43.9 KB
RevLine 
[23]1/* $Id: TMAllVirtual.cpp 101025 2023-09-06 08:29:11Z vboxsync $ */
[1]2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[1]26 */
27
28
[57358]29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
[1]32#define LOG_GROUP LOG_GROUP_TM
[35346]33#include <VBox/vmm/tm.h>
[37439]34#include <VBox/vmm/dbgftrace.h>
[1]35#ifdef IN_RING3
[2248]36# include <iprt/thread.h>
[1]37#endif
[101025]38#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
39# include <iprt/x86.h>
40#endif
[1]41#include "TMInternal.h"
[80268]42#include <VBox/vmm/vmcc.h>
[35346]43#include <VBox/vmm/vmm.h>
[1]44#include <VBox/err.h>
45#include <VBox/log.h>
46#include <VBox/sup.h>
47
48#include <iprt/time.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
[29250]51#include <iprt/asm-math.h>
[1]52
53
54
[2869]55/**
[58116]56 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
[5505]57 */
[58116]58DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
59 uint64_t u64PrevNanoTS)
[5505]60{
[93657]61 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
[5505]62 pData->cBadPrev++;
63 if ((int64_t)u64DeltaPrev < 0)
[54270]64 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
65 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
[5505]66 else
[54270]67 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
68 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
[5505]69}
70
71
[92709]72#ifdef IN_RING3
[5505]73/**
[92709]74 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
75 */
76static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
77{
78 RT_NOREF(pData);
79 if (pExtra)
80 pExtra->uTSCValue = ASMReadTSC();
81 return RTTimeSystemNanoTS();
82}
83#endif
84
85
86/**
[58116]87 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
[5505]88 *
[54270]89 * This is the initial worker, so the first call in each context ends up here.
90 * It is also used should the delta rating of the host CPUs change or if the
91 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
92 * last two events may occur as CPUs are taken online.
[5505]93 */
[87626]94DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
[5505]95{
[93657]96 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
[92709]97 PFNTIMENANOTSINTERNAL pfnWorker;
[54270]98
99 /*
[92709]100 * We require a valid GIP for the selection below.
101 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
[54270]102 */
[5505]103 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
[92709]104#ifdef IN_RING3
105 if (pGip)
106#endif
107 {
108 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
109 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
110 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
111 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
[54270]112
[92709]113 /*
114 * Determine the new worker.
115 */
[93744]116#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
[92709]117 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
[93744]118#endif
[92709]119 switch (pGip->u32Mode)
120 {
[93744]121#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
[92709]122 case SUPGIPMODE_SYNC_TSC:
123 case SUPGIPMODE_INVARIANT_TSC:
[93744]124# ifdef IN_RING0
[92709]125 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
126 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
127 else
128 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
[93744]129# else
[92709]130 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
131 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
132 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
133 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
134 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
135 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
136 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
137 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
138 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
139 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
140 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
141 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
142 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
143 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
144 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
145 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
146 else
147 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
148 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
149 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
[93744]150# endif
[92709]151 break;
[54270]152
[92709]153 case SUPGIPMODE_ASYNC_TSC:
[93744]154# ifdef IN_RING0
[92709]155 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
[93744]156# else
[92709]157 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
158 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
159 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
160 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
161 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
162 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
163 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
164 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
165 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
166 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
167 else
168 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
[93744]169# endif
170 break;
[54270]171#endif
[92709]172 default:
173 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
174 }
[54270]175 }
[92709]176#ifdef IN_RING3
177 else
178 pfnWorker = tmR3VirtualNanoTSDriverless;
179#endif
[54270]180
181 /*
182 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
183 */
[93655]184 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker);
[87626]185 return pfnWorker(pData, pExtra);
[54270]186}
187
188
189/**
[58116]190 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
[54270]191 */
[87626]192DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
193 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
[54270]194{
[93657]195 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
[87626]196 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
[39038]197#ifndef _MSC_VER
[54270]198 return UINT64_MAX;
[39038]199#endif
[5505]200}
201
202
203/**
204 * Wrapper around the IPRT GIP time methods.
205 */
[80281]206DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
[5505]207{
[93655]208#ifdef IN_RING3
209 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/);
210#elif defined(IN_RING0)
211 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
212 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/);
213 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
[19141]214 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
[93655]215#else
216# error "unsupported context"
217#endif
[37439]218 /*DBGFTRACE_POS_U64(pVM, u64);*/
[13380]219 return u64;
[5505]220}
221
222
223/**
[87633]224 * Wrapper around the IPRT GIP time methods, extended version.
225 */
226DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
227{
228 RTITMENANOTSEXTRA Extra;
[93655]229#ifdef IN_RING3
230 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra);
231#elif defined(IN_RING0)
232 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
233 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra);
234 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
[87633]235 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
[93655]236#else
237# error "unsupported context"
238#endif
[87633]239 if (puTscNow)
240 *puTscNow = Extra.uTSCValue;
241 /*DBGFTRACE_POS_U64(pVM, u64);*/
242 return u64;
243}
244
245
246/**
[443]247 * Get the time when we're not running at 100%
[2611]248 *
[443]249 * @returns The timestamp.
[87633]250 * @param pVM The cross context VM structure.
251 * @param puTscNow Where to return the TSC corresponding to the returned
252 * timestamp (delta adjusted). Optional.
[443]253 */
[87633]254static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
[443]255{
[2611]256 /*
257 * Recalculate the RTTimeNanoTS() value for the period where
[443]258 * warp drive has been enabled.
259 */
[87633]260 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
[443]261 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
262 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
263 u64 /= 100;
264 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
265
[2611]266 /*
267 * Now we apply the virtual time offset.
[2869]268 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
269 * machine started if it had been running continuously without any suspends.)
[443]270 */
271 u64 -= pVM->tm.s.u64VirtualOffset;
272 return u64;
273}
274
275
276/**
277 * Get the raw virtual time.
[2611]278 *
279 * @returns The current time stamp.
[87633]280 * @param pVM The cross context VM structure.
[443]281 */
[80281]282DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
[443]283{
284 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
[2869]285 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
[87633]286 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
[443]287}
288
289
290/**
[87633]291 * Get the raw virtual time, extended version.
292 *
293 * @returns The current time stamp.
294 * @param pVM The cross context VM structure.
295 * @param puTscNow Where to return the TSC corresponding to the returned
296 * timestamp (delta adjusted). Optional.
297 */
298DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
299{
300 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
301 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
302 return tmVirtualGetRawNonNormal(pVM, puTscNow);
303}
304
305
306/**
[2248]307 * Inlined version of tmVirtualGetEx.
[1]308 */
[80268]309DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
[1]310{
311 uint64_t u64;
[19324]312 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
[1]313 {
314 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
[443]315 u64 = tmVirtualGetRaw(pVM);
[1]316
317 /*
318 * Use the chance to check for expired timers.
319 */
[19660]320 if (fCheckTimers)
321 {
[80268]322 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
[46420]323 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
[19660]324 && !pVM->tm.s.fRunningQueues
[87792]325 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
[19660]326 || ( pVM->tm.s.fVirtualSyncTicking
[87792]327 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
[19660]328 )
[1]329 )
[19660]330 && !pVM->tm.s.fRunningQueues
331 )
332 {
333 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
[74785]334 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
[19660]335 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
[1]336#ifdef IN_RING3
[19660]337 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
[1]338#endif
[19660]339 }
[1]340 }
341 }
342 else
343 u64 = pVM->tm.s.u64Virtual;
344 return u64;
345}
346
[2082]347
[1]348/**
[2248]349 * Gets the current TMCLOCK_VIRTUAL time
350 *
351 * @returns The timestamp.
[58122]352 * @param pVM The cross context VM structure.
[2248]353 *
354 * @remark While the flow of time will never go backwards, the speed of the
355 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
356 * influenced by power saving (SpeedStep, PowerNow!), while the former
357 * makes use of TSC and kernel timers.
358 */
[80268]359VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
[2248]360{
[32419]361 return tmVirtualGet(pVM, true /*fCheckTimers*/);
[2248]362}
363
364
365/**
[19660]366 * Gets the current TMCLOCK_VIRTUAL time without checking
367 * timers or anything.
[2248]368 *
[19660]369 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
370 *
[2248]371 * @returns The timestamp.
[58122]372 * @param pVM The cross context VM structure.
[2248]373 *
[19660]374 * @remarks See TMVirtualGet.
[2248]375 */
[80268]376VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
[2248]377{
[19660]378 return tmVirtualGet(pVM, false /*fCheckTimers*/);
[2248]379}
380
381
382/**
[32419]383 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
384 *
385 * @returns Host nano second count.
[58122]386 * @param pVM The cross context VM structure.
[32419]387 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
388 */
389DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
390{
391 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
392 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
393 return cVirtTicksToDeadline;
394}
395
396
397/**
[19803]398 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
399 *
400 * @returns The timestamp.
[58122]401 * @param pVM The cross context VM structure.
[19803]402 * @param u64 raw virtual time.
403 * @param off offVirtualSync.
[32419]404 * @param pcNsToDeadline Where to return the number of nano seconds to
405 * the next virtual sync timer deadline. Can be
406 * NULL.
[87633]407 * @param pnsAbsDeadline Where to return the absolute deadline.
408 * Optional.
[19803]409 */
[87633]410DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
411 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
[19803]412{
413 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
414
415 /*
[33540]416 * Don't make updates until we've check the timer queue.
[19803]417 */
418 bool fUpdatePrev = true;
419 bool fUpdateOff = true;
420 bool fStop = false;
421 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
422 uint64_t u64Delta = u64 - u64Prev;
423 if (RT_LIKELY(!(u64Delta >> 32)))
424 {
425 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
426 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
427 {
428 off -= u64Sub;
[20050]429 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
[19803]430 }
431 else
432 {
433 /* we've completely caught up. */
434 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
435 off = pVM->tm.s.offVirtualSyncGivenUp;
436 fStop = true;
[20050]437 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
[19803]438 }
439 }
440 else
441 {
442 /* More than 4 seconds since last time (or negative), ignore it. */
443 fUpdateOff = false;
444 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
445 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
446 }
447
448 /*
449 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
450 * approach is to never pass the head timer. So, when we do stop the clock and
451 * set the timer pending flag.
452 */
453 u64 -= off;
[37517]454
455 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
456 if (u64Last > u64)
457 {
458 u64 = u64Last + 1;
459 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
460 }
461
[87792]462 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
[87633]463 if (pnsAbsDeadline)
464 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
465 thru this code over an over again even if there aren't any timer changes. */
[19803]466 if (u64 < u64Expire)
467 {
[37517]468 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
[19803]469 if (fUpdateOff)
470 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
471 if (fStop)
472 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
473 if (fUpdatePrev)
474 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
[32419]475 if (pcNsToDeadline)
476 {
477 uint64_t cNsToDeadline = u64Expire - u64;
478 if (pVM->tm.s.fVirtualSyncCatchUp)
479 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
480 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
481 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
482 }
[90346]483 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
[19803]484 }
485 else
486 {
487 u64 = u64Expire;
488 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
489 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
490
491 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
[80268]492 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
[19803]493 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
[74785]494 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
[20050]495 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
[90346]496 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
[19803]497
[32419]498 if (pcNsToDeadline)
499 *pcNsToDeadline = 0;
[19803]500#ifdef IN_RING3
501 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
502#endif
503 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
504 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
505 }
506 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
507
[20050]508 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
[37439]509 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
[19803]510 return u64;
511}
512
513
514/**
515 * tmVirtualSyncGetEx worker for when we get the lock.
516 *
517 * @returns timesamp.
[58122]518 * @param pVM The cross context VM structure.
[19803]519 * @param u64 The virtual clock timestamp.
[32419]520 * @param pcNsToDeadline Where to return the number of nano seconds to
521 * the next virtual sync timer deadline. Can be
522 * NULL.
[87633]523 * @param pnsAbsDeadline Where to return the absolute deadline.
524 * Optional.
[19803]525 */
[87633]526DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
[19803]527{
528 /*
529 * Not ticking?
530 */
531 if (!pVM->tm.s.fVirtualSyncTicking)
532 {
533 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
[90346]534 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
[32419]535 if (pcNsToDeadline)
536 *pcNsToDeadline = 0;
[87633]537 if (pnsAbsDeadline)
538 *pnsAbsDeadline = u64;
[19803]539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
[20050]540 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
[37439]541 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
[19803]542 return u64;
543 }
544
545 /*
546 * Handle catch up in a separate function.
547 */
548 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
549 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
[87633]550 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
[19803]551
552 /*
553 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
554 * approach is to never pass the head timer. So, when we do stop the clock and
555 * set the timer pending flag.
556 */
557 u64 -= off;
[37517]558
559 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
560 if (u64Last > u64)
561 {
562 u64 = u64Last + 1;
563 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
564 }
565
[87792]566 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
[87633]567 if (pnsAbsDeadline)
568 *pnsAbsDeadline = u64Expire;
[19803]569 if (u64 < u64Expire)
[32419]570 {
[37517]571 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
[90346]572 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
[32419]573 if (pcNsToDeadline)
574 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
575 }
[19803]576 else
577 {
578 u64 = u64Expire;
579 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
580 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
581
582 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
[80268]583 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
[19803]584 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
[74785]585 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
[20050]586 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
[90346]587 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
[19803]588
589#ifdef IN_RING3
590 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
591#endif
[32419]592 if (pcNsToDeadline)
593 *pcNsToDeadline = 0;
[19803]594 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
595 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
596 }
597 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
[20050]598 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
[37439]599 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
[19803]600 return u64;
601}
602
603
604/**
[1]605 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
606 *
607 * @returns The timestamp.
[58122]608 * @param pVM The cross context VM structure.
[32419]609 * @param fCheckTimers Check timers or not
610 * @param pcNsToDeadline Where to return the number of nano seconds to
611 * the next virtual sync timer deadline. Can be
612 * NULL.
[87633]613 * @param pnsAbsDeadline Where to return the absolute deadline.
614 * Optional.
615 * @param puTscNow Where to return the TSC corresponding to the
616 * returned timestamp (delta adjusted). Optional.
[2248]617 * @thread EMT.
[1]618 */
[87633]619DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
620 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
[1]621{
[19752]622 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
[19660]623
[37439]624 uint64_t u64;
[19752]625 if (!pVM->tm.s.fVirtualSyncTicking)
[32419]626 {
627 if (pcNsToDeadline)
628 *pcNsToDeadline = 0;
[37439]629 u64 = pVM->tm.s.u64VirtualSync;
630 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
631 return u64;
[32419]632 }
[19752]633
634 /*
635 * Query the virtual clock and do the usual expired timer check.
636 */
637 Assert(pVM->tm.s.cVirtualTicking);
[87633]638 u64 = tmVirtualGetRawEx(pVM, puTscNow);
[19752]639 if (fCheckTimers)
[1]640 {
[80268]641 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
[46420]642 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
[87792]643 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
[1]644 {
[19752]645 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
646 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
[1]647#ifdef IN_RING3
[19752]648 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
[1]649#endif
[19752]650 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
[1]651 }
[19752]652 }
[1]653
[19752]654 /*
[37517]655 * If we can get the lock, get it. The result is much more reliable.
656 *
657 * Note! This is where all clock source devices branch off because they
658 * will be owning the lock already. The 'else' is taken by code
659 * which is less picky or hasn't been adjusted yet
660 */
[87633]661 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
662 * here and the remainder of this function in a static worker. */
[90346]663 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
[87633]664 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
[37517]665
666 /*
[19803]667 * When the clock is ticking, not doing catch ups and not running into an
668 * expired time, we can get away without locking. Try this first.
669 */
670 uint64_t off;
[19810]671 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
[19803]672 {
[19810]673 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
[19803]674 {
[19810]675 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
676 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
677 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
678 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
[19803]679 {
[19810]680 off = u64 - off;
[87792]681 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
[32419]682 if (off < u64Expire)
[19810]683 {
[87633]684 if (pnsAbsDeadline)
685 *pnsAbsDeadline = u64Expire;
[32419]686 if (pcNsToDeadline)
687 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
[19810]688 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
[20050]689 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
[37439]690 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
[19810]691 return off;
692 }
[19803]693 }
694 }
695 }
[19810]696 else
697 {
698 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
699 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
700 {
[32419]701 if (pcNsToDeadline)
702 *pcNsToDeadline = 0;
[87633]703 if (pnsAbsDeadline)
704 *pnsAbsDeadline = off;
[19810]705 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
[20050]706 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
[37439]707 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
[19810]708 return off;
709 }
710 }
[19803]711
712 /*
[19752]713 * Read the offset and adjust if we're playing catch-up.
714 *
715 * The catch-up adjusting work by us decrementing the offset by a percentage of
716 * the time elapsed since the previous TMVirtualGetSync call.
717 *
718 * It's possible to get a very long or even negative interval between two read
719 * for the following reasons:
720 * - Someone might have suspended the process execution, frequently the case when
721 * debugging the process.
722 * - We might be on a different CPU which TSC isn't quite in sync with the
723 * other CPUs in the system.
[33540]724 * - Another thread is racing us and we might have been preempted while inside
[19752]725 * this function.
726 *
727 * Assuming nano second virtual time, we can simply ignore any intervals which has
728 * any of the upper 32 bits set.
729 */
730 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
[19803]731 int cOuterTries = 42;
[19752]732 for (;; cOuterTries--)
733 {
[19803]734 /* Try grab the lock, things get simpler when owning the lock. */
[90346]735 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
[19803]736 if (RT_SUCCESS_NP(rcLock))
[87633]737 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
[19803]738
[19752]739 /* Re-check the ticking flag. */
740 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
[1]741 {
[19803]742 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
743 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
744 && cOuterTries > 0)
745 continue;
[32419]746 if (pcNsToDeadline)
747 *pcNsToDeadline = 0;
[87633]748 if (pnsAbsDeadline)
749 *pnsAbsDeadline = off;
[20050]750 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
[37439]751 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
[19803]752 return off;
[19752]753 }
[19444]754
[19803]755 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
756 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
[19752]757 {
758 /* No changes allowed, try get a consistent set of parameters. */
[19803]759 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
760 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
[19752]761 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
[19803]762 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
763 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
764 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
[19752]765 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
766 || cOuterTries <= 0)
[1]767 {
[19752]768 uint64_t u64Delta = u64 - u64Prev;
769 if (RT_LIKELY(!(u64Delta >> 32)))
770 {
771 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
772 if (off > u64Sub + offGivenUp)
773 {
774 off -= u64Sub;
[20050]775 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
[19752]776 }
777 else
778 {
779 /* we've completely caught up. */
780 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
781 off = offGivenUp;
[20050]782 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
[19752]783 }
784 }
785 else
786 /* More than 4 seconds since last time (or negative), ignore it. */
787 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
788
789 /* Check that we're still running and in catch up. */
[19803]790 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
791 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
[19752]792 break;
793 if (cOuterTries <= 0)
[19803]794 break; /* enough */
[1]795 }
796 }
[19803]797 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
798 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
[19752]799 break; /* Got an consistent offset */
[19803]800 else if (cOuterTries <= 0)
801 break; /* enough */
[19752]802 }
803 if (cOuterTries <= 0)
804 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
[1]805
[19752]806 /*
807 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
808 * approach is to never pass the head timer. So, when we do stop the clock and
809 * set the timer pending flag.
810 */
811 u64 -= off;
[37517]812/** @todo u64VirtualSyncLast */
[87792]813 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
[87633]814 if (pnsAbsDeadline)
815 *pnsAbsDeadline = u64Expire;
[19752]816 if (u64 >= u64Expire)
817 {
[80268]818 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
[46420]819 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
[1]820 {
[74785]821 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
[19803]822 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
[19752]823 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
824#ifdef IN_RING3
825 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
826#endif
827 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
[20050]828 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
[19752]829 }
830 else
[20050]831 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
[32419]832 if (pcNsToDeadline)
833 *pcNsToDeadline = 0;
[19803]834 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
[1]835 }
[32419]836 else if (pcNsToDeadline)
837 {
838 uint64_t cNsToDeadline = u64Expire - u64;
839 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
840 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
841 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
842 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
843 }
[14253]844
[20050]845 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
[37439]846 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
[1]847 return u64;
848}
849
850
851/**
[2551]852 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
853 *
854 * @returns The timestamp.
[58122]855 * @param pVM The cross context VM structure.
[2551]856 * @thread EMT.
[19660]857 * @remarks May set the timer and virtual sync FFs.
[2551]858 */
[80268]859VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
[2551]860{
[87633]861 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
[2551]862}
863
864
865/**
[19660]866 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
867 * TMCLOCK_VIRTUAL.
868 *
869 * @returns The timestamp.
[58122]870 * @param pVM The cross context VM structure.
[19660]871 * @thread EMT.
872 * @remarks May set the timer and virtual sync FFs.
873 */
[80268]874VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
[19660]875{
[87633]876 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
[19660]877}
878
879
880/**
[87633]881 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
882 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
883 *
884 * @returns The timestamp.
885 * @param pVM The cross context VM structure.
886 * @param puTscNow Where to return the TSC value that the return
887 * value is relative to. This is delta adjusted.
888 * @thread EMT.
889 * @remarks May set the timer and virtual sync FFs.
890 */
891VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
892{
893 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
894}
895
896
897/**
[19660]898 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
899 *
900 * @returns The timestamp.
[58122]901 * @param pVM The cross context VM structure.
[19660]902 * @param fCheckTimers Check timers on the virtual clock or not.
903 * @thread EMT.
904 * @remarks May set the timer and virtual sync FFs.
905 */
[80268]906VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
[19660]907{
[87633]908 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
[19660]909}
910
911
912/**
[32419]913 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
914 * without checking timers running on TMCLOCK_VIRTUAL.
915 *
916 * @returns The timestamp.
[58122]917 * @param pVM The cross context VM structure.
[32419]918 * @param pcNsToDeadline Where to return the number of nano seconds to
919 * the next virtual sync timer deadline.
[87633]920 * @param puTscNow Where to return the TSC value that the return
921 * value is relative to. This is delta adjusted.
922 * @param puDeadlineVersion Where to return the deadline "version" number.
923 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
924 * to check if the absolute deadline is still up to
925 * date and the caller can skip calling this
926 * function.
[32419]927 * @thread EMT.
928 * @remarks May set the timer and virtual sync FFs.
929 */
[87633]930VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
931 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
[32419]932{
933 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
[87633]934 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
[32419]935 *pcNsToDeadline = cNsToDeadlineTmp;
936 return u64Now;
937}
938
939
940/**
941 * Gets the number of nano seconds to the next virtual sync deadline.
942 *
943 * @returns The number of TMCLOCK_VIRTUAL ticks.
[58122]944 * @param pVM The cross context VM structure.
[87633]945 * @param puTscNow Where to return the TSC value that the return
946 * value is relative to. This is delta adjusted.
947 * @param puDeadlineVersion Where to return the deadline "version" number.
948 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
949 * to check if the absolute deadline is still up to
950 * date and the caller can skip calling this
951 * function.
[32419]952 * @thread EMT.
953 * @remarks May set the timer and virtual sync FFs.
954 */
[87633]955VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
[32419]956{
957 uint64_t cNsToDeadline;
[87633]958 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
[32419]959 return cNsToDeadline;
960}
961
962
963/**
[87633]964 * Checks if the given deadline is still current.
965 *
966 * @retval true if the deadline is still current.
967 * @retval false if the deadline is outdated.
968 * @param pVM The cross context VM structure.
969 * @param uDeadlineVersion The deadline version to check.
970 */
971VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
972{
973 /** @todo Try use ASMAtomicUoReadU64 instead. */
[87792]974 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
[87633]975 return u64Expire == uDeadlineVersion;
976}
977
978
979/**
[2248]980 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
[2611]981 *
[2248]982 * @return The current lag.
[58122]983 * @param pVM The cross context VM structure.
[2248]984 */
[80268]985VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
[2248]986{
[2283]987 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
[2248]988}
989
990
991/**
992 * Get the current catch-up percent.
[2611]993 *
[2248]994 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
[58122]995 * @param pVM The cross context VM structure.
[2248]996 */
[80268]997VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
[2248]998{
999 if (pVM->tm.s.fVirtualSyncCatchUp)
1000 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
1001 return 0;
1002}
1003
1004
1005/**
[1]1006 * Gets the current TMCLOCK_VIRTUAL frequency.
1007 *
[33540]1008 * @returns The frequency.
[58122]1009 * @param pVM The cross context VM structure.
[1]1010 */
[22808]1011VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
[1]1012{
[39078]1013 NOREF(pVM);
[1]1014 return TMCLOCK_FREQ_VIRTUAL;
1015}
1016
1017
1018/**
[19747]1019 * Worker for TMR3PauseClocks.
[1]1020 *
[39402]1021 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
[58122]1022 * @param pVM The cross context VM structure.
[1]1023 */
[80281]1024int tmVirtualPauseLocked(PVMCC pVM)
[1]1025{
[19747]1026 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
[39402]1027 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
[19747]1028 if (c == 0)
[1]1029 {
[19747]1030 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1031 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1032 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
[1]1033 }
[19324]1034 return VINF_SUCCESS;
[1]1035}
1036
1037
1038/**
[19747]1039 * Worker for TMR3ResumeClocks.
[1]1040 *
[39402]1041 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
[58122]1042 * @param pVM The cross context VM structure.
[1]1043 */
[80281]1044int tmVirtualResumeLocked(PVMCC pVM)
[1]1045{
[19747]1046 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
[39402]1047 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
[19747]1048 if (c == 1)
[1]1049 {
[19747]1050 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1051 pVM->tm.s.u64VirtualRawPrev = 0;
1052 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1053 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1054 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
[1]1055 }
[19324]1056 return VINF_SUCCESS;
[1]1057}
1058
1059
1060/**
1061 * Converts from virtual ticks to nanoseconds.
1062 *
1063 * @returns nanoseconds.
[58122]1064 * @param pVM The cross context VM structure.
[1]1065 * @param u64VirtualTicks The virtual ticks to convert.
[33540]1066 * @remark There could be rounding errors here. We just do a simple integer divide
[1]1067 * without any adjustments.
1068 */
[22808]1069VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
[1]1070{
[39078]1071 NOREF(pVM);
[1]1072 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1073 return u64VirtualTicks;
1074}
1075
1076
1077/**
1078 * Converts from virtual ticks to microseconds.
1079 *
1080 * @returns microseconds.
[58122]1081 * @param pVM The cross context VM structure.
[1]1082 * @param u64VirtualTicks The virtual ticks to convert.
[33540]1083 * @remark There could be rounding errors here. We just do a simple integer divide
[1]1084 * without any adjustments.
1085 */
[22808]1086VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
[1]1087{
[39078]1088 NOREF(pVM);
[1]1089 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1090 return u64VirtualTicks / 1000;
1091}
1092
1093
1094/**
1095 * Converts from virtual ticks to milliseconds.
1096 *
1097 * @returns milliseconds.
[58122]1098 * @param pVM The cross context VM structure.
[1]1099 * @param u64VirtualTicks The virtual ticks to convert.
[33540]1100 * @remark There could be rounding errors here. We just do a simple integer divide
[1]1101 * without any adjustments.
1102 */
[22808]1103VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
[1]1104{
[39078]1105 NOREF(pVM);
[32819]1106 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
[1]1107 return u64VirtualTicks / 1000000;
1108}
1109
1110
1111/**
1112 * Converts from nanoseconds to virtual ticks.
1113 *
1114 * @returns virtual ticks.
[58122]1115 * @param pVM The cross context VM structure.
[1]1116 * @param u64NanoTS The nanosecond value ticks to convert.
1117 * @remark There could be rounding and overflow errors here.
1118 */
[22808]1119VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
[1]1120{
[39078]1121 NOREF(pVM);
[1]1122 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1123 return u64NanoTS;
1124}
1125
1126
1127/**
1128 * Converts from microseconds to virtual ticks.
1129 *
1130 * @returns virtual ticks.
[58122]1131 * @param pVM The cross context VM structure.
[1]1132 * @param u64MicroTS The microsecond value ticks to convert.
1133 * @remark There could be rounding and overflow errors here.
1134 */
[22808]1135VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
[1]1136{
[39078]1137 NOREF(pVM);
[1]1138 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1139 return u64MicroTS * 1000;
1140}
1141
1142
1143/**
1144 * Converts from milliseconds to virtual ticks.
1145 *
1146 * @returns virtual ticks.
[58122]1147 * @param pVM The cross context VM structure.
[1]1148 * @param u64MilliTS The millisecond value ticks to convert.
1149 * @remark There could be rounding and overflow errors here.
1150 */
[22808]1151VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
[1]1152{
[39078]1153 NOREF(pVM);
[1]1154 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1155 return u64MilliTS * 1000000;
1156}
1157
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use