VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMEmt.cpp@ 76553

Last change on this file since 76553 was 76553, checked in by vboxsync, 5 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 50.9 KB
RevLine 
[23]1/* $Id: VMEmt.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
[1]2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
[76553]7 * Copyright (C) 2006-2019 Oracle Corporation
[1]8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
[5999]12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]16 */
17
18
[57358]19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
[1]22#define LOG_GROUP LOG_GROUP_VM
[35346]23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/em.h>
[71040]26#include <VBox/vmm/nem.h>
[35346]27#include <VBox/vmm/pdmapi.h>
[40274]28#ifdef VBOX_WITH_REM
29# include <VBox/vmm/rem.h>
30#endif
[35346]31#include <VBox/vmm/tm.h>
[1]32#include "VMInternal.h"
[35346]33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/uvm.h>
[1]35
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
[29250]40#include <iprt/asm-math.h>
[1]41#include <iprt/semaphore.h>
[2984]42#include <iprt/string.h>
[1]43#include <iprt/thread.h>
[2283]44#include <iprt/time.h>
[1]45
[19400]46
[57358]47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
[62650]50int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
[1]51
[19395]52
[1]53/**
[19395]54 * The emulation thread main function.
[1]55 *
56 * @returns Thread exit code.
[62650]57 * @param hThreadSelf The handle to the executing thread.
[19395]58 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
[1]59 */
[62650]60DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD hThreadSelf, void *pvArgs)
[1]61{
[19217]62 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
[62650]63 return vmR3EmulationThreadWithId(hThreadSelf, pUVCpu, pUVCpu->idCpu);
[19395]64}
65
66
67/**
68 * The emulation thread main function, with Virtual CPU ID for debugging.
69 *
70 * @returns Thread exit code.
[62647]71 * @param hThreadSelf The handle to the executing thread.
[19395]72 * @param pUVCpu Pointer to the user mode per-VCpu structure.
73 * @param idCpu The virtual CPU ID, for backtrace purposes.
74 */
[62647]75int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
[19395]76{
77 PUVM pUVM = pUVCpu->pUVM;
[13796]78 int rc;
[62647]79 RT_NOREF_PV(hThreadSelf);
[13782]80
[6796]81 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
82 ("Invalid arguments to the emulation thread!\n"));
[1]83
[19217]84 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
[13796]85 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
[13782]86
[36437]87 if ( pUVM->pVmm2UserMethods
88 && pUVM->pVmm2UserMethods->pfnNotifyEmtInit)
89 pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
90
[1]91 /*
92 * The request loop.
93 */
[13796]94 rc = VINF_SUCCESS;
[62650]95 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", hThreadSelf, pUVM));
[19435]96 VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
[68853]97 ASMAtomicIncU32(&pUVM->vm.s.cActiveEmts);
[1]98 for (;;)
99 {
100 /*
[66155]101 * During early init there is no pVM and/or pVCpu, so make a special path
[6796]102 * for that to keep things clearly separate.
[1]103 */
[66155]104 PVM pVM = pUVM->pVM;
105 PVMCPU pVCpu = pUVCpu->pVCpu;
106 if (!pVCpu || !pVM)
[1]107 {
108 /*
[6796]109 * Check for termination first.
[1]110 */
[6796]111 if (pUVM->vm.s.fTerminateEMT)
112 {
113 rc = VINF_EM_TERMINATE;
114 break;
115 }
[19217]116
[19395]117 /*
118 * Only the first VCPU may initialize the VM during early init
119 * and must therefore service all VMCPUID_ANY requests.
120 * See also VMR3Create
121 */
[38838]122 if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
[19395]123 && pUVCpu->idCpu == 0)
[6796]124 {
125 /*
[19395]126 * Service execute in any EMT request.
[6796]127 */
[38838]128 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
[23190]129 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
[6796]130 }
[38838]131 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
[6796]132 {
133 /*
[19395]134 * Service execute in specific EMT request.
[19217]135 */
[38838]136 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
[23190]137 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
[19217]138 }
139 else
140 {
141 /*
[6796]142 * Nothing important is pending, so wait for something.
143 */
[19217]144 rc = VMR3WaitU(pUVCpu);
[13816]145 if (RT_FAILURE(rc))
[20855]146 {
[20901]147 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
[6796]148 break;
[20855]149 }
[6796]150 }
[1]151 }
[6796]152 else
[1]153 {
154 /*
[6796]155 * Pending requests which needs servicing?
156 *
157 * We check for state changes in addition to status codes when
158 * servicing requests. (Look after the ifs.)
[1]159 */
[6796]160 enmBefore = pVM->enmVMState;
[30473]161 if (pUVM->vm.s.fTerminateEMT)
[6796]162 {
163 rc = VINF_EM_TERMINATE;
164 break;
165 }
[23145]166
[74790]167 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
[6796]168 {
[23145]169 rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]);
[23190]170 Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
[23145]171 }
[38838]172 else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
[23145]173 {
[6796]174 /*
[19395]175 * Service execute in any EMT request.
[6796]176 */
[38838]177 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
[23190]178 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
[6796]179 }
[38838]180 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
[19217]181 {
182 /*
[19395]183 * Service execute in specific EMT request.
[19217]184 */
[38838]185 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
[23190]186 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
[19217]187 }
[61628]188 else if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
189 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
[6796]190 {
191 /*
192 * Service the debugger request.
193 */
[61628]194 rc = DBGFR3VMMForcedAction(pVM, pVCpu);
[23190]195 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
[6796]196 }
[46420]197 else if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
[6796]198 {
199 /*
200 * Service a delayed reset request.
201 */
[60404]202 rc = VBOXSTRICTRC_VAL(VMR3ResetFF(pVM));
[6796]203 VM_FF_CLEAR(pVM, VM_FF_RESET);
[23190]204 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
[6796]205 }
206 else
207 {
208 /*
209 * Nothing important is pending, so wait for something.
210 */
[19217]211 rc = VMR3WaitU(pUVCpu);
[13816]212 if (RT_FAILURE(rc))
[20856]213 {
[20901]214 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
[6796]215 break;
[20856]216 }
[6796]217 }
218
[1]219 /*
[6796]220 * Check for termination requests, these have extremely high priority.
[1]221 */
[6796]222 if ( rc == VINF_EM_TERMINATE
[30473]223 || pUVM->vm.s.fTerminateEMT)
[6796]224 break;
[6804]225 }
[6796]226
[6804]227 /*
[19435]228 * Some requests (both VMR3Req* and the DBGF) can potentially resume
229 * or start the VM, in that case we'll get a change in VM status
230 * indicating that we're now running.
[6804]231 */
[66156]232 if (RT_SUCCESS(rc))
[6804]233 {
[66156]234 pVM = pUVM->pVM;
235 if (pVM)
[19435]236 {
[66156]237 pVCpu = &pVM->aCpus[idCpu];
238 if ( pVM->enmVMState == VMSTATE_RUNNING
239 && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
240 {
241 rc = EMR3ExecuteVM(pVM, pVCpu);
242 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
243 }
[19435]244 }
[1]245 }
[6804]246
[1]247 } /* forever */
248
249
250 /*
[68853]251 * Decrement the active EMT count if we haven't done it yet in vmR3Destroy.
252 */
253 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
254 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
255
256
257 /*
[23009]258 * Cleanup and exit.
[67987]259 * EMT0 does the VM destruction after all other EMTs have deregistered and terminated.
[1]260 */
[13818]261 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
[62650]262 hThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
[66096]263 PVM pVM;
[30473]264 if ( idCpu == 0
[66096]265 && (pVM = pUVM->pVM) != NULL)
[1]266 {
[66096]267 /* Wait for any other EMTs to terminate before we destroy the VM (see vmR3DestroyVM). */
268 for (VMCPUID iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
269 {
270 RTTHREAD hThread;
271 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
272 if (hThread != NIL_RTTHREAD)
273 {
[66097]274 int rc2 = RTThreadWait(hThread, 5 * RT_MS_1SEC, NULL);
[66096]275 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
276 if (RT_FAILURE(rc2))
277 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
278 }
279 }
280
281 /* Switch to the terminated state, clearing the VM pointer and finally destroy the VM. */
[30473]282 vmR3SetTerminated(pVM);
[66096]283
[30473]284 pUVM->pVM = NULL;
[75731]285 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
286 {
287 pUVM->aCpus[iCpu].pVM = NULL;
288 pUVM->aCpus[iCpu].pVCpu = NULL;
289 }
[30473]290
291 int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
292 AssertLogRelRC(rc2);
[1]293 }
[67987]294 /* Deregister the EMT with VMMR0. */
295 else if ( idCpu != 0
296 && (pVM = pUVM->pVM) != NULL)
297 {
298 int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_DEREGISTER_VMCPU, 0, NULL);
299 AssertLogRelRC(rc2);
300 }
[30473]301
[36437]302 if ( pUVM->pVmm2UserMethods
303 && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm)
304 pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
305
[30473]306 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
[6796]307 Log(("vmR3EmulationThread: EMT is terminated.\n"));
[1]308 return rc;
309}
310
[2984]311
[247]312/**
[5167]313 * Gets the name of a halt method.
314 *
315 * @returns Pointer to a read only string.
316 * @param enmMethod The method.
317 */
318static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
319{
320 switch (enmMethod)
321 {
[6796]322 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
323 case VMHALTMETHOD_DEFAULT: return "default";
324 case VMHALTMETHOD_OLD: return "old";
325 case VMHALTMETHOD_1: return "method1";
326 //case VMHALTMETHOD_2: return "method2";
327 case VMHALTMETHOD_GLOBAL_1: return "global1";
328 default: return "unknown";
[5167]329 }
330}
331
332
333/**
[30473]334 * Signal a fatal wait error.
335 *
336 * @returns Fatal error code to be propagated up the call stack.
337 * @param pUVCpu The user mode per CPU structure of the calling
338 * EMT.
339 * @param pszFmt The error format with a single %Rrc in it.
340 * @param rcFmt The status code to format.
341 */
342static int vmR3FatalWaitError(PUVMCPU pUVCpu, const char *pszFmt, int rcFmt)
343{
344 /** @todo This is wrong ... raise a fatal error / guru meditation
345 * instead. */
346 AssertLogRelMsgFailed((pszFmt, rcFmt));
347 ASMAtomicUoWriteBool(&pUVCpu->pUVM->vm.s.fTerminateEMT, true);
348 if (pUVCpu->pVM)
349 VM_FF_SET(pUVCpu->pVM, VM_FF_CHECK_VM_STATE);
[39402]350 return VERR_VM_FATAL_WAIT_ERROR;
[30473]351}
352
353
354/**
[2830]355 * The old halt loop.
[1]356 */
[19217]357static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
[1]358{
359 /*
360 * Halt loop.
361 */
[19217]362 PVM pVM = pUVCpu->pVM;
363 PVMCPU pVCpu = pUVCpu->pVCpu;
364
[1]365 int rc = VINF_SUCCESS;
[19217]366 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
[2284]367 //unsigned cLoops = 0;
[1]368 for (;;)
369 {
370 /*
371 * Work the timers and check if we can exit.
372 * The poll call gives us the ticks left to the next event in
373 * addition to perhaps set an FF.
374 */
[33170]375 uint64_t const u64StartTimers = RTTimeNanoTS();
[1]376 TMR3TimerQueuesDo(pVM);
[33170]377 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
[33172]378 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
[74791]379 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]380 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[1]381 break;
[19821]382 uint64_t u64NanoTS;
383 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
[74791]384 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]385 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[1]386 break;
387
388 /*
389 * Wait for a while. Someone will wake us up or interrupt the call if
390 * anything needs our attention.
391 */
392 if (u64NanoTS < 50000)
393 {
[2284]394 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
[1]395 /* spin */;
396 }
397 else
398 {
399 VMMR3YieldStop(pVM);
[2284]400 //uint64_t u64Start = RTTimeNanoTS();
[1]401 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
402 {
[2284]403 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
[33170]404 uint64_t const u64StartSchedYield = RTTimeNanoTS();
[1]405 RTThreadYield(); /* this is the best we can do here */
[33170]406 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
407 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
[1]408 }
409 else if (u64NanoTS < 2000000)
410 {
[2284]411 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
[33170]412 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
[19217]413 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
[33170]414 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
415 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
[1]416 }
417 else
418 {
[2284]419 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
[33170]420 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
[19217]421 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
[33170]422 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
423 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
[1]424 }
[2284]425 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
[13818]426 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
[1]427 }
428 if (rc == VERR_TIMEOUT)
429 rc = VINF_SUCCESS;
[13816]430 else if (RT_FAILURE(rc))
[1]431 {
[30473]432 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
[1]433 break;
434 }
435 }
436
[19217]437 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
[2830]438 return rc;
439}
440
441
442/**
[2984]443 * Initialize the configuration of halt method 1 & 2.
444 *
445 * @return VBox status code. Failure on invalid CFGM data.
[58126]446 * @param pUVM The user mode VM structure.
[2984]447 */
[6796]448static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
[2984]449{
450 /*
451 * The defaults.
452 */
[5080]453#if 1 /* DEBUGGING STUFF - REMOVE LATER */
[6796]454 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
455 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
456 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
457 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
458 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
[5080]459#else
[6796]460 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
461 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
462 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
463 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
464 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
[5080]465#endif
[2984]466
467 /*
468 * Query overrides.
[5080]469 *
[33540]470 * I don't have time to bother with niceties such as invalid value checks
[5080]471 * here right now. sorry.
[2984]472 */
[6796]473 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
[2984]474 if (pCfg)
475 {
[5080]476 uint32_t u32;
477 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
[6796]478 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
[5080]479 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
[6796]480 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
[5080]481 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
[6796]482 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
[5080]483 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
[6796]484 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
[5080]485 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
[6796]486 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
[57212]487 LogRel(("VMEmt: HaltedMethod1 config: %d/%d/%d/%d/%d\n",
[6796]488 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
489 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
490 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
491 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
492 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
[2984]493 }
494
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Initialize halt method 1.
501 *
502 * @return VBox status code.
[6796]503 * @param pUVM Pointer to the user mode VM structure.
[2984]504 */
[6796]505static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
[2984]506{
[6796]507 return vmR3HaltMethod12ReadConfigU(pUVM);
[2984]508}
509
510
511/**
[2965]512 * Method 1 - Block whenever possible, and when lagging behind
[33540]513 * switch to spinning for 10-30ms with occasional blocking until
[2830]514 * the lag has been eliminated.
515 */
[19217]516static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
[2830]517{
[19217]518 PUVM pUVM = pUVCpu->pUVM;
519 PVMCPU pVCpu = pUVCpu->pVCpu;
520 PVM pVM = pUVCpu->pVM;
[6796]521
[1]522 /*
[2984]523 * To simplify things, we decide up-front whether we should switch to spinning or
524 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
525 * and that it will generate interrupts or other events that will cause us to exit
526 * the halt loop.
[1]527 */
[2830]528 bool fBlockOnce = false;
529 bool fSpinning = false;
[2965]530 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
531 if (u32CatchUpPct /* non-zero if catching up */)
[2830]532 {
[19217]533 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
[2830]534 {
[6796]535 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
[2965]536 if (fSpinning)
537 {
538 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
[19217]539 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
[6796]540 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
541 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
542 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
[2965]543 }
544 else
545 {
[19217]546 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
547 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
[2965]548 }
549 }
550 else
551 {
[6796]552 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
[2965]553 if (fSpinning)
[19217]554 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
[2830]555 }
556 }
[19217]557 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
[2965]558 {
[19217]559 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
560 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
[2965]561 }
[1]562
[2830]563 /*
564 * Halt loop.
565 */
566 int rc = VINF_SUCCESS;
[19217]567 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
[2830]568 unsigned cLoops = 0;
569 for (;; cLoops++)
570 {
571 /*
[2965]572 * Work the timers and check if we can exit.
[2830]573 */
[33170]574 uint64_t const u64StartTimers = RTTimeNanoTS();
[2830]575 TMR3TimerQueuesDo(pVM);
[33170]576 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
[33172]577 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
[74791]578 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]579 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[2830]580 break;
[1]581
[2830]582 /*
583 * Estimate time left to the next event.
584 */
[19821]585 uint64_t u64NanoTS;
586 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
[74791]587 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]588 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[2830]589 break;
590
591 /*
592 * Block if we're not spinning and the interval isn't all that small.
593 */
[2965]594 if ( ( !fSpinning
[2830]595 || fBlockOnce)
[5080]596#if 1 /* DEBUGGING STUFF - REMOVE LATER */
597 && u64NanoTS >= 100000) /* 0.100 ms */
598#else
[2830]599 && u64NanoTS >= 250000) /* 0.250 ms */
[5080]600#endif
[2830]601 {
[19217]602 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
[2830]603 VMMR3YieldStop(pVM);
604
605 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
[19217]606 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
[2830]607 cMilliSecs = 1;
[2965]608 else
[19217]609 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
[33170]610
[2984]611 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
[33170]612 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
[19217]613 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
[33170]614 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
615 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
616
[2830]617 if (rc == VERR_TIMEOUT)
618 rc = VINF_SUCCESS;
[13816]619 else if (RT_FAILURE(rc))
[2830]620 {
[30473]621 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
[2830]622 break;
623 }
624
[2965]625 /*
626 * Calc the statistics.
[2830]627 * Update averages every 16th time, and flush parts of the history every 64th time.
628 */
629 const uint64_t Elapsed = RTTimeNanoTS() - Start;
[19217]630 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
[2830]631 if (Elapsed > u64NanoTS)
[19217]632 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
633 pUVCpu->vm.s.Halt.Method12.cBlocks++;
634 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
[2830]635 {
[19217]636 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
637 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
[2830]638 {
[19217]639 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
640 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
[2830]641 }
642 }
[2984]643 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
[2830]644
645 /*
646 * Clear the block once flag if we actually blocked.
647 */
648 if ( fBlockOnce
649 && Elapsed > 100000 /* 0.1 ms */)
650 fBlockOnce = false;
651 }
652 }
[2984]653 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
[2830]654
[19217]655 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
[1]656 return rc;
657}
658
659
660/**
[5167]661 * Initialize the global 1 halt method.
662 *
663 * @return VBox status code.
[6796]664 * @param pUVM Pointer to the user mode VM structure.
[5167]665 */
[6796]666static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
[5167]667{
[33170]668 /*
669 * The defaults.
670 */
671 uint32_t cNsResolution = SUPSemEventMultiGetResolution(pUVM->vm.s.pSession);
[33172]672 if (cNsResolution > 5*RT_NS_100US)
673 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 50000;
674 else if (cNsResolution > RT_NS_100US)
675 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = cNsResolution / 4;
676 else
677 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 2000;
[33170]678
679 /*
680 * Query overrides.
681 *
[33540]682 * I don't have time to bother with niceties such as invalid value checks
[33170]683 * here right now. sorry.
684 */
685 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedGlobal1");
686 if (pCfg)
687 {
688 uint32_t u32;
689 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "SpinBlockThreshold", &u32)))
690 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = u32;
691 }
[57212]692 LogRel(("VMEmt: HaltedGlobal1 config: cNsSpinBlockThresholdCfg=%u\n",
[33170]693 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg));
[5167]694 return VINF_SUCCESS;
695}
696
697
698/**
699 * The global 1 halt method - Block in GMM (ring-0) and let it
700 * try take care of the global scheduling of EMT threads.
701 */
[19217]702static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
[5167]703{
[19217]704 PUVM pUVM = pUVCpu->pUVM;
705 PVMCPU pVCpu = pUVCpu->pVCpu;
706 PVM pVM = pUVCpu->pVM;
[19395]707 Assert(VMMGetCpu(pVM) == pVCpu);
[39078]708 NOREF(u64Now);
[6796]709
[5167]710 /*
711 * Halt loop.
712 */
[33170]713 //uint64_t u64NowLog, u64Start;
714 //u64Start = u64NowLog = RTTimeNanoTS();
[5167]715 int rc = VINF_SUCCESS;
[19217]716 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
[5167]717 unsigned cLoops = 0;
718 for (;; cLoops++)
719 {
720 /*
721 * Work the timers and check if we can exit.
722 */
[33170]723 uint64_t const u64StartTimers = RTTimeNanoTS();
[5167]724 TMR3TimerQueuesDo(pVM);
[33170]725 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
[33172]726 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
[74791]727 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]728 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[5167]729 break;
730
731 /*
732 * Estimate time left to the next event.
733 */
[33170]734 //u64NowLog = RTTimeNanoTS();
[5167]735 uint64_t u64Delta;
[19660]736 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
[74791]737 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]738 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[5167]739 break;
740
741 /*
742 * Block if we're not spinning and the interval isn't all that small.
743 */
[33170]744 if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
[5167]745 {
746 VMMR3YieldStop(pVM);
[74791]747 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]748 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[33170]749 break;
[5167]750
[33170]751 //RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
752 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
[20864]753 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
[33170]754 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
755 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
756 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
757
[5167]758 if (rc == VERR_INTERRUPTED)
759 rc = VINF_SUCCESS;
[13816]760 else if (RT_FAILURE(rc))
[5167]761 {
[51052]762 rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Halt: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
[5167]763 break;
764 }
[33170]765 else
766 {
767 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
768 if (cNsOverslept > 50000)
[75646]769 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);
[33170]770 else if (cNsOverslept < -50000)
[75646]771 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt);
[33170]772 else
[75646]773 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt);
[33170]774 }
[5167]775 }
776 /*
777 * When spinning call upon the GVMM and do some wakups once
778 * in a while, it's not like we're actually busy or anything.
779 */
780 else if (!(cLoops & 0x1fff))
781 {
[33170]782 uint64_t const u64StartSchedYield = RTTimeNanoTS();
[20864]783 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
[33170]784 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
785 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
[5167]786 }
787 }
[33170]788 //RTLogPrintf("*** %u loops %'llu; lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM));
[5167]789
[19217]790 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
[5167]791 return rc;
792}
793
794
795/**
796 * The global 1 halt method - VMR3Wait() worker.
797 *
798 * @returns VBox status code.
[19217]799 * @param pUVCpu Pointer to the user mode VMCPU structure.
[5167]800 */
[19217]801static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
[5167]802{
[19217]803 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
[6796]804
[19217]805 PVM pVM = pUVCpu->pUVM->pVM;
[19141]806 PVMCPU pVCpu = VMMGetCpu(pVM);
[19395]807 Assert(pVCpu->idCpu == pUVCpu->idCpu);
[19141]808
[5167]809 int rc = VINF_SUCCESS;
810 for (;;)
811 {
812 /*
813 * Check Relevant FFs.
814 */
[74791]815 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
[74789]816 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
[5167]817 break;
818
819 /*
820 * Wait for a while. Someone will wake us up or interrupt the call if
821 * anything needs our attention.
822 */
[20864]823 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
[5167]824 if (rc == VERR_INTERRUPTED)
825 rc = VINF_SUCCESS;
[13816]826 else if (RT_FAILURE(rc))
[5167]827 {
[51052]828 rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Wait: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
[5167]829 break;
830 }
831 }
[6796]832
[19217]833 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
[5167]834 return rc;
835}
836
837
838/**
839 * The global 1 halt method - VMR3NotifyFF() worker.
840 *
[19400]841 * @param pUVCpu Pointer to the user mode VMCPU structure.
842 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
[5167]843 */
[19400]844static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
[5167]845{
[75646]846 /*
847 * With ring-0 halting, the fWait flag isn't set, so we have to check the
848 * CPU state to figure out whether to do a wakeup call.
849 */
850 PVMCPU pVCpu = pUVCpu->pVCpu;
851 if (pVCpu)
[5167]852 {
[75646]853 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
854 if (enmState == VMCPUSTATE_STARTED_HALTED || pUVCpu->vm.s.fWait)
[19461]855 {
[75646]856 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
857 AssertRC(rc);
858
859 }
860 else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
861 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
862 {
[71040]863 if (enmState == VMCPUSTATE_STARTED_EXEC)
[19461]864 {
[71040]865 if (fFlags & VMNOTIFYFF_FLAGS_POKE)
866 {
867 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
868 AssertRC(rc);
869 }
[19461]870 }
[71129]871 else if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
872 || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
[71040]873 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
[40274]874#ifdef VBOX_WITH_REM
[71040]875 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM)
876 {
877 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
878 REMR3NotifyFF(pUVCpu->pVM);
879 }
880#endif
[19461]881 }
[19407]882 }
[75646]883 /* This probably makes little sense: */
884 else if (pUVCpu->vm.s.fWait)
885 {
886 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
887 AssertRC(rc);
888 }
[5167]889}
890
891
892/**
[6796]893 * Bootstrap VMR3Wait() worker.
894 *
895 * @returns VBox status code.
[58126]896 * @param pUVCpu Pointer to the user mode VMCPU structure.
[6796]897 */
[19217]898static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
[6796]899{
[19217]900 PUVM pUVM = pUVCpu->pUVM;
[6796]901
[19217]902 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
903
[6796]904 int rc = VINF_SUCCESS;
905 for (;;)
906 {
907 /*
908 * Check Relevant FFs.
909 */
[38838]910 if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) /* global requests pending? */
[6796]911 break;
[38838]912 if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) /* local requests pending? */
[19217]913 break;
914
915 if ( pUVCpu->pVM
[74791]916 && ( VM_FF_IS_ANY_SET(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
[74789]917 || VMCPU_FF_IS_ANY_SET(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
[19141]918 )
919 )
[6796]920 break;
[30473]921 if (pUVM->vm.s.fTerminateEMT)
[6799]922 break;
[6796]923
924 /*
925 * Wait for a while. Someone will wake us up or interrupt the call if
926 * anything needs our attention.
927 */
[19217]928 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
[6796]929 if (rc == VERR_TIMEOUT)
930 rc = VINF_SUCCESS;
[13816]931 else if (RT_FAILURE(rc))
[6796]932 {
[30473]933 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
[6796]934 break;
935 }
936 }
937
[19217]938 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
[6796]939 return rc;
940}
941
942
943/**
944 * Bootstrap VMR3NotifyFF() worker.
945 *
[19400]946 * @param pUVCpu Pointer to the user mode VMCPU structure.
947 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
[6796]948 */
[19400]949static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
[6796]950{
[19217]951 if (pUVCpu->vm.s.fWait)
[6796]952 {
[19217]953 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
[6796]954 AssertRC(rc);
955 }
[19400]956 NOREF(fFlags);
[6796]957}
958
959
960/**
[2984]961 * Default VMR3Wait() worker.
962 *
963 * @returns VBox status code.
[58126]964 * @param pUVCpu Pointer to the user mode VMCPU structure.
[2984]965 */
[19217]966static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
[2984]967{
[19217]968 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
[6796]969
[19217]970 PVM pVM = pUVCpu->pVM;
971 PVMCPU pVCpu = pUVCpu->pVCpu;
[19141]972 int rc = VINF_SUCCESS;
[2984]973 for (;;)
974 {
975 /*
976 * Check Relevant FFs.
977 */
[74791]978 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
[74789]979 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
[2984]980 break;
981
982 /*
983 * Wait for a while. Someone will wake us up or interrupt the call if
984 * anything needs our attention.
985 */
[19217]986 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
[2984]987 if (rc == VERR_TIMEOUT)
988 rc = VINF_SUCCESS;
[13816]989 else if (RT_FAILURE(rc))
[2984]990 {
[30473]991 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc", rc);
[2984]992 break;
993 }
994 }
[6796]995
[19217]996 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
[2984]997 return rc;
998}
999
1000
1001/**
1002 * Default VMR3NotifyFF() worker.
1003 *
[19400]1004 * @param pUVCpu Pointer to the user mode VMCPU structure.
1005 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
[2984]1006 */
[19400]1007static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
[2984]1008{
[19217]1009 if (pUVCpu->vm.s.fWait)
[2984]1010 {
[19217]1011 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
[2984]1012 AssertRC(rc);
1013 }
[71040]1014 else
1015 {
1016 PVMCPU pVCpu = pUVCpu->pVCpu;
1017 if (pVCpu)
1018 {
1019 VMCPUSTATE enmState = pVCpu->enmState;
[71129]1020 if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
1021 || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
[71040]1022 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
[40274]1023#ifdef VBOX_WITH_REM
[71040]1024 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)
1025 && enmState == VMCPUSTATE_STARTED_EXEC_REM)
1026 REMR3NotifyFF(pUVCpu->pVM);
[40274]1027#endif
[71040]1028 }
1029 }
[2984]1030}
1031
1032
1033/**
1034 * Array with halt method descriptors.
1035 * VMINT::iHaltMethod contains an index into this array.
1036 */
1037static const struct VMHALTMETHODDESC
1038{
[75646]1039 /** The halt method ID. */
1040 VMHALTMETHOD enmHaltMethod;
1041 /** Set if the method support halting directly in ring-0. */
1042 bool fMayHaltInRing0;
[2984]1043 /** The init function for loading config and initialize variables. */
[75646]1044 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
[2984]1045 /** The term function. */
[75646]1046 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
[19401]1047 /** The VMR3WaitHaltedU function. */
[75646]1048 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
[19401]1049 /** The VMR3WaitU function. */
[75646]1050 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
[19401]1051 /** The VMR3NotifyCpuFFU function. */
[75646]1052 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
[19401]1053 /** The VMR3NotifyGlobalFFU function. */
[75646]1054 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
[2984]1055} g_aHaltMethods[] =
1056{
[75646]1057 { VMHALTMETHOD_BOOTSTRAP, false, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
1058 { VMHALTMETHOD_OLD, false, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
1059 { VMHALTMETHOD_1, false, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
1060 { VMHALTMETHOD_GLOBAL_1, true, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
[2984]1061};
1062
1063
1064/**
1065 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1066 *
1067 * This function is called by thread other than EMT to make
1068 * sure EMT wakes up and promptly service an FF request.
1069 *
[19400]1070 * @param pUVM Pointer to the user mode VM structure.
1071 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
[44340]1072 * @internal
[2984]1073 */
[44340]1074VMMR3_INT_DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
[2984]1075{
[19400]1076 LogFlow(("VMR3NotifyGlobalFFU:\n"));
[75646]1077 uint32_t iHaltMethod = pUVM->vm.s.iHaltMethod;
[19217]1078
[75646]1079 if (g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
1080 g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF(pUVM, fFlags);
[19400]1081 else
1082 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
[75646]1083 g_aHaltMethods[iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
[2984]1084}
1085
1086
1087/**
[6796]1088 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1089 *
1090 * This function is called by thread other than EMT to make
1091 * sure EMT wakes up and promptly service an FF request.
1092 *
[53795]1093 * @param pUVCpu Pointer to the user mode per CPU VM structure.
[19400]1094 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
[44340]1095 * @internal
[6796]1096 */
[44340]1097VMMR3_INT_DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
[6796]1098{
[19217]1099 PUVM pUVM = pUVCpu->pUVM;
[6796]1100
[19217]1101 LogFlow(("VMR3NotifyCpuFFU:\n"));
[19400]1102 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
[19217]1103}
1104
1105
[6796]1106/**
[2830]1107 * Halted VM Wait.
1108 * Any external event will unblock the thread.
1109 *
[20961]1110 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
[2830]1111 * case an appropriate status code is returned.
[58122]1112 * @param pVM The cross context VM structure.
[58123]1113 * @param pVCpu The cross context virtual CPU structure.
[2830]1114 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
1115 * @thread The emulation thread.
[53795]1116 * @remarks Made visible for implementing vmsvga sync register.
[44340]1117 * @internal
[2830]1118 */
[44340]1119VMMR3_INT_DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
[2830]1120{
1121 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
1122
1123 /*
1124 * Check Relevant FFs.
1125 */
1126 const uint32_t fMask = !fIgnoreInterrupts
[19141]1127 ? VMCPU_FF_EXTERNAL_HALTED_MASK
[60804]1128 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
[74791]1129 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
[74789]1130 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
[2830]1131 {
[74798]1132 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#RX64)\n", pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
[2830]1133 return VINF_SUCCESS;
1134 }
1135
1136 /*
[12549]1137 * The yielder is suspended while we're halting, while TM might have clock(s) running
1138 * only at certain times and need to be notified..
[2830]1139 */
[19539]1140 if (pVCpu->idCpu == 0)
1141 VMMR3YieldSuspend(pVM);
[19032]1142 TMNotifyStartOfHalt(pVCpu);
[2830]1143
1144 /*
1145 * Record halt averages for the last second.
1146 */
[19299]1147 PUVMCPU pUVCpu = pVCpu->pUVCpu;
[2830]1148 uint64_t u64Now = RTTimeNanoTS();
[19217]1149 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
[2830]1150 if (off > 1000000000)
1151 {
[19217]1152 if (off > _4G || !pUVCpu->vm.s.cHalts)
[2830]1153 {
[19217]1154 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1155 pUVCpu->vm.s.HaltFrequency = 1;
[2830]1156 }
1157 else
1158 {
[19217]1159 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1160 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
[2830]1161 }
[19217]1162 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1163 pUVCpu->vm.s.cHalts = 0;
[2830]1164 }
[19217]1165 pUVCpu->vm.s.cHalts++;
[2830]1166
1167 /*
1168 * Do the halt.
1169 */
[45749]1170 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
[19435]1171 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
[19217]1172 PUVM pUVM = pUVCpu->pUVM;
1173 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
[19435]1174 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
[2830]1175
1176 /*
[12549]1177 * Notify TM and resume the yielder
[2830]1178 */
[19032]1179 TMNotifyEndOfHalt(pVCpu);
[19539]1180 if (pVCpu->idCpu == 0)
1181 VMMR3YieldResume(pVM);
[2830]1182
[19141]1183 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
[2830]1184 return rc;
1185}
1186
1187
1188/**
[1]1189 * Suspended VM Wait.
1190 * Only a handful of forced actions will cause the function to
1191 * return to the caller.
1192 *
[20961]1193 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
[1]1194 * case an appropriate status code is returned.
[19217]1195 * @param pUVCpu Pointer to the user mode VMCPU structure.
[1]1196 * @thread The emulation thread.
[44340]1197 * @internal
[1]1198 */
[44340]1199VMMR3_INT_DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
[1]1200{
[6796]1201 LogFlow(("VMR3WaitU:\n"));
[1]1202
1203 /*
1204 * Check Relevant FFs.
1205 */
[19217]1206 PVM pVM = pUVCpu->pVM;
1207 PVMCPU pVCpu = pUVCpu->pVCpu;
[19141]1208
[6796]1209 if ( pVM
[74791]1210 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
[74789]1211 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
[19141]1212 )
1213 )
[1]1214 {
[19141]1215 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
[1]1216 return VINF_SUCCESS;
1217 }
1218
[2984]1219 /*
1220 * Do waiting according to the halt method (so VMR3NotifyFF
1221 * doesn't have to special case anything).
1222 */
[19217]1223 PUVM pUVM = pUVCpu->pUVM;
1224 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
[36255]1225 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pUVM->pVM ? pUVM->pVM->fGlobalForcedActions : 0));
[2984]1226 return rc;
1227}
1228
1229
1230/**
[24740]1231 * Interface that PDMR3Suspend, PDMR3PowerOff and PDMR3Reset uses when they wait
1232 * for the handling of asynchronous notifications to complete.
1233 *
1234 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1235 * case an appropriate status code is returned.
1236 * @param pUVCpu Pointer to the user mode VMCPU structure.
1237 * @thread The emulation thread.
1238 */
1239VMMR3_INT_DECL(int) VMR3AsyncPdmNotificationWaitU(PUVMCPU pUVCpu)
1240{
1241 LogFlow(("VMR3AsyncPdmNotificationWaitU:\n"));
1242 return VMR3WaitU(pUVCpu);
1243}
1244
1245
1246/**
1247 * Interface that PDM the helper asynchronous notification completed methods
1248 * uses for EMT0 when it is waiting inside VMR3AsyncPdmNotificationWaitU().
1249 *
1250 * @param pUVM Pointer to the user mode VM structure.
1251 */
1252VMMR3_INT_DECL(void) VMR3AsyncPdmNotificationWakeupU(PUVM pUVM)
1253{
1254 LogFlow(("VMR3AsyncPdmNotificationWakeupU:\n"));
1255 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); /* this will have to do for now. */
1256 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[0], 0 /*fFlags*/);
1257}
1258
1259
1260/**
[20663]1261 * Rendezvous callback that will be called once.
1262 *
[23145]1263 * @returns VBox strict status code.
[58122]1264 * @param pVM The cross context VM structure.
[58123]1265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[20663]1266 * @param pvUser The new g_aHaltMethods index.
1267 */
[23145]1268static DECLCALLBACK(VBOXSTRICTRC) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
[20663]1269{
1270 PUVM pUVM = pVM->pUVM;
1271 uintptr_t i = (uintptr_t)pvUser;
1272 Assert(i < RT_ELEMENTS(g_aHaltMethods));
1273 NOREF(pVCpu);
1274
1275 /*
1276 * Terminate the old one.
1277 */
1278 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1279 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1280 {
1281 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1282 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1283 }
1284
1285 /* Assert that the failure fallback is where we expect. */
1286 Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
1287 Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);
1288
1289 /*
1290 * Init the new one.
1291 */
1292 int rc = VINF_SUCCESS;
1293 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1294 if (g_aHaltMethods[i].pfnInit)
1295 {
1296 rc = g_aHaltMethods[i].pfnInit(pUVM);
1297 if (RT_FAILURE(rc))
1298 {
1299 /* Fall back on the bootstrap method. This requires no
1300 init/term (see assertion above), and will always work. */
1301 AssertLogRelRC(rc);
1302 i = 0;
1303 }
1304 }
1305
1306 /*
1307 * Commit it.
1308 */
1309 pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
1310 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1311
[75646]1312 VMMR3SetMayHaltInRing0(pVCpu, g_aHaltMethods[i].fMayHaltInRing0,
1313 g_aHaltMethods[i].enmHaltMethod == VMHALTMETHOD_GLOBAL_1
1314 ? pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg : 0);
1315
[20663]1316 return rc;
1317}
1318
1319
1320/**
[2984]1321 * Changes the halt method.
1322 *
1323 * @returns VBox status code.
[6796]1324 * @param pUVM Pointer to the user mode VM structure.
[2984]1325 * @param enmHaltMethod The new halt method.
1326 * @thread EMT.
1327 */
[6796]1328int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
[2984]1329{
[6796]1330 PVM pVM = pUVM->pVM; Assert(pVM);
[2984]1331 VM_ASSERT_EMT(pVM);
1332 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1333
1334 /*
1335 * Resolve default (can be overridden in the configuration).
1336 */
1337 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
[1]1338 {
[2984]1339 uint32_t u32;
1340 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
[13816]1341 if (RT_SUCCESS(rc))
[1]1342 {
[2984]1343 enmHaltMethod = (VMHALTMETHOD)u32;
1344 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
[6300]1345 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
[1]1346 }
[2984]1347 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
[6300]1348 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
[2984]1349 else
[5167]1350 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1351 //enmHaltMethod = VMHALTMETHOD_1;
[3273]1352 //enmHaltMethod = VMHALTMETHOD_OLD;
[2984]1353 }
[57212]1354 LogRel(("VMEmt: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
[1]1355
[2984]1356 /*
1357 * Find the descriptor.
1358 */
1359 unsigned i = 0;
1360 while ( i < RT_ELEMENTS(g_aHaltMethods)
1361 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1362 i++;
1363 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1364
1365 /*
[20663]1366 * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
[2984]1367 */
[20663]1368 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
[1]1369}
1370
[53795]1371
1372/**
1373 * Special interface for implementing a HLT-like port on a device.
1374 *
1375 * This can be called directly from device code, provide the device is trusted
1376 * to access the VMM directly. Since we may not have an accurate register set
1377 * and the caller certainly shouldn't (device code does not access CPU
1378 * registers), this function will return when interrupts are pending regardless
1379 * of the actual EFLAGS.IF state.
1380 *
1381 * @returns VBox error status (never informational statuses).
[58122]1382 * @param pVM The cross context VM structure.
[53795]1383 * @param idCpu The id of the calling EMT.
1384 */
1385VMMR3DECL(int) VMR3WaitForDeviceReady(PVM pVM, VMCPUID idCpu)
1386{
1387 /*
1388 * Validate caller and resolve the CPU ID.
1389 */
1390 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1391 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1392 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1393 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1394
1395 /*
1396 * Tag along with the HLT mechanics for now.
1397 */
1398 int rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
1399 if (RT_SUCCESS(rc))
1400 return VINF_SUCCESS;
1401 return rc;
1402}
1403
1404
1405/**
1406 * Wakes up a CPU that has called VMR3WaitForDeviceReady.
1407 *
1408 * @returns VBox error status (never informational statuses).
[58122]1409 * @param pVM The cross context VM structure.
[53795]1410 * @param idCpu The id of the calling EMT.
1411 */
1412VMMR3DECL(int) VMR3NotifyCpuDeviceReady(PVM pVM, VMCPUID idCpu)
1413{
1414 /*
1415 * Validate caller and resolve the CPU ID.
1416 */
1417 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1418 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1419 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1420
1421 /*
1422 * Pretend it was an FF that got set since we've got logic for that already.
1423 */
1424 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
1425 return VINF_SUCCESS;
1426}
1427
[68853]1428
1429/**
1430 * Returns the number of active EMTs.
1431 *
1432 * This is used by the rendezvous code during VM destruction to avoid waiting
1433 * for EMTs that aren't around any more.
1434 *
1435 * @returns Number of active EMTs. 0 if invalid parameter.
1436 * @param pUVM The user mode VM structure.
1437 */
1438VMMR3_INT_DECL(uint32_t) VMR3GetActiveEmts(PUVM pUVM)
1439{
1440 UVM_ASSERT_VALID_EXT_RETURN(pUVM, 0);
1441 return pUVM->vm.s.cActiveEmts;
1442}
1443
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use