VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMEmt.cpp@ 99740

Last change on this file since 99740 was 99576, checked in by vboxsync, 18 months ago

VMM: Preparations for getting interrupts injected into the guest. With ARMv8 there are two types of interrupts (normal interrupts and fast interrupts) which need to be mapped to forced action flags. Because the PIC and APIC flags are not needed those are mapped to IRQs and FIQs on ARM respectively, bugref:10389

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.9 KB
Line 
1/* $Id: VMEmt.cpp 99576 2023-05-03 10:24:27Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VM
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/em.h>
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/nem.h>
38#include <VBox/vmm/pdmapi.h>
39#include <VBox/vmm/tm.h>
40#include "VMInternal.h"
41#include <VBox/vmm/vmcc.h>
42
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <iprt/asm-math.h>
48#include <iprt/semaphore.h>
49#include <iprt/string.h>
50#include <iprt/thread.h>
51#include <iprt/time.h>
52
53
54/*********************************************************************************************************************************
55* Internal Functions *
56*********************************************************************************************************************************/
57int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
58
59
60/**
61 * The emulation thread main function.
62 *
63 * @returns Thread exit code.
64 * @param hThreadSelf The handle to the executing thread.
65 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
66 */
67DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD hThreadSelf, void *pvArgs)
68{
69 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
70 return vmR3EmulationThreadWithId(hThreadSelf, pUVCpu, pUVCpu->idCpu);
71}
72
73
74/**
75 * The emulation thread main function, with Virtual CPU ID for debugging.
76 *
77 * @returns Thread exit code.
78 * @param hThreadSelf The handle to the executing thread.
79 * @param pUVCpu Pointer to the user mode per-VCpu structure.
80 * @param idCpu The virtual CPU ID, for backtrace purposes.
81 */
82int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
83{
84 PUVM pUVM = pUVCpu->pUVM;
85 int rc;
86 RT_NOREF_PV(hThreadSelf);
87
88 AssertReleaseMsg(RT_VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
89 ("Invalid arguments to the emulation thread!\n"));
90
91 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
92 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
93
94 if ( pUVM->pVmm2UserMethods
95 && pUVM->pVmm2UserMethods->pfnNotifyEmtInit)
96 pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
97
98 /*
99 * The request loop.
100 */
101 rc = VINF_SUCCESS;
102 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", hThreadSelf, pUVM));
103 VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
104 ASMAtomicIncU32(&pUVM->vm.s.cActiveEmts);
105 for (;;)
106 {
107 /*
108 * During early init there is no pVM and/or pVCpu, so make a special path
109 * for that to keep things clearly separate.
110 */
111 PVM pVM = pUVM->pVM;
112 PVMCPU pVCpu = pUVCpu->pVCpu;
113 if (!pVCpu || !pVM)
114 {
115 /*
116 * Check for termination first.
117 */
118 if (pUVM->vm.s.fTerminateEMT)
119 {
120 rc = VINF_EM_TERMINATE;
121 break;
122 }
123
124 /*
125 * Only the first VCPU may initialize the VM during early init
126 * and must therefore service all VMCPUID_ANY requests.
127 * See also VMR3Create
128 */
129 if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
130 && pUVCpu->idCpu == 0)
131 {
132 /*
133 * Service execute in any EMT request.
134 */
135 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
136 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
137 }
138 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
139 {
140 /*
141 * Service execute in specific EMT request.
142 */
143 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
144 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
145 }
146 else
147 {
148 /*
149 * Nothing important is pending, so wait for something.
150 */
151 rc = VMR3WaitU(pUVCpu);
152 if (RT_FAILURE(rc))
153 {
154 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
155 break;
156 }
157 }
158 }
159 else
160 {
161 /*
162 * Pending requests which needs servicing?
163 *
164 * We check for state changes in addition to status codes when
165 * servicing requests. (Look after the ifs.)
166 */
167 enmBefore = pVM->enmVMState;
168 if (pUVM->vm.s.fTerminateEMT)
169 {
170 rc = VINF_EM_TERMINATE;
171 break;
172 }
173
174 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
175 {
176 rc = VMMR3EmtRendezvousFF(pVM, pVM->apCpusR3[idCpu]);
177 Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
178 }
179 else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
180 {
181 /*
182 * Service execute in any EMT request.
183 */
184 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
185 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
186 }
187 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
188 {
189 /*
190 * Service execute in specific EMT request.
191 */
192 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
193 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
194 }
195 else if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
196 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
197 {
198 /*
199 * Service the debugger request.
200 */
201 rc = DBGFR3VMMForcedAction(pVM, pVCpu);
202 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
203 }
204 else if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
205 {
206 /*
207 * Service a delayed reset request.
208 */
209 rc = VBOXSTRICTRC_VAL(VMR3ResetFF(pVM));
210 VM_FF_CLEAR(pVM, VM_FF_RESET);
211 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
212 }
213 else
214 {
215 /*
216 * Nothing important is pending, so wait for something.
217 */
218 rc = VMR3WaitU(pUVCpu);
219 if (RT_FAILURE(rc))
220 {
221 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
222 break;
223 }
224 }
225
226 /*
227 * Check for termination requests, these have extremely high priority.
228 */
229 if ( rc == VINF_EM_TERMINATE
230 || pUVM->vm.s.fTerminateEMT)
231 break;
232 }
233
234 /*
235 * Some requests (both VMR3Req* and the DBGF) can potentially resume
236 * or start the VM, in that case we'll get a change in VM status
237 * indicating that we're now running.
238 */
239 if (RT_SUCCESS(rc))
240 {
241 pVM = pUVM->pVM;
242 if (pVM)
243 {
244 pVCpu = pVM->apCpusR3[idCpu];
245 if ( pVM->enmVMState == VMSTATE_RUNNING
246 && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
247 {
248 rc = EMR3ExecuteVM(pVM, pVCpu);
249 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
250 }
251 }
252 }
253
254 } /* forever */
255
256
257 /*
258 * Decrement the active EMT count if we haven't done it yet in vmR3Destroy.
259 */
260 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
261 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
262
263
264 /*
265 * Cleanup and exit.
266 * EMT0 does the VM destruction after all other EMTs have deregistered and terminated.
267 */
268 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
269 hThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
270 PVM pVM;
271 if ( idCpu == 0
272 && (pVM = pUVM->pVM) != NULL)
273 {
274 /* Wait for any other EMTs to terminate before we destroy the VM (see vmR3DestroyVM). */
275 for (VMCPUID iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
276 {
277 RTTHREAD hThread;
278 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
279 if (hThread != NIL_RTTHREAD)
280 {
281 int rc2 = RTThreadWait(hThread, 5 * RT_MS_1SEC, NULL);
282 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
283 if (RT_FAILURE(rc2))
284 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
285 }
286 }
287
288 /* Switch to the terminated state, clearing the VM pointer and finally destroy the VM. */
289 vmR3SetTerminated(pVM);
290
291 pUVM->pVM = NULL;
292 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
293 {
294 pUVM->aCpus[iCpu].pVM = NULL;
295 pUVM->aCpus[iCpu].pVCpu = NULL;
296 }
297
298 int rc2 = GVMMR3DestroyVM(pUVM, pVM);
299 AssertLogRelRC(rc2);
300 }
301 /* Deregister the EMT with VMMR0. */
302 else if ( idCpu != 0
303 && (pVM = pUVM->pVM) != NULL)
304 {
305 int rc2 = GVMMR3DeregisterVCpu(pVM, idCpu);
306 AssertLogRelRC(rc2);
307 }
308
309 if ( pUVM->pVmm2UserMethods
310 && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm)
311 pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
312
313 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
314 Log(("vmR3EmulationThread: EMT is terminated.\n"));
315 return rc;
316}
317
318
319/**
320 * Gets the name of a halt method.
321 *
322 * @returns Pointer to a read only string.
323 * @param enmMethod The method.
324 */
325static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
326{
327 switch (enmMethod)
328 {
329 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
330 case VMHALTMETHOD_DEFAULT: return "default";
331 case VMHALTMETHOD_OLD: return "old";
332 case VMHALTMETHOD_1: return "method1";
333 //case VMHALTMETHOD_2: return "method2";
334 case VMHALTMETHOD_GLOBAL_1: return "global1";
335 default: return "unknown";
336 }
337}
338
339
340/**
341 * Signal a fatal wait error.
342 *
343 * @returns Fatal error code to be propagated up the call stack.
344 * @param pUVCpu The user mode per CPU structure of the calling
345 * EMT.
346 * @param pszFmt The error format with a single %Rrc in it.
347 * @param rcFmt The status code to format.
348 */
349static int vmR3FatalWaitError(PUVMCPU pUVCpu, const char *pszFmt, int rcFmt)
350{
351 /** @todo This is wrong ... raise a fatal error / guru meditation
352 * instead. */
353 AssertLogRelMsgFailed((pszFmt, rcFmt));
354 ASMAtomicUoWriteBool(&pUVCpu->pUVM->vm.s.fTerminateEMT, true);
355 if (pUVCpu->pVM)
356 VM_FF_SET(pUVCpu->pVM, VM_FF_CHECK_VM_STATE);
357 return VERR_VM_FATAL_WAIT_ERROR;
358}
359
360
361/**
362 * The old halt loop.
363 */
364static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
365{
366 /*
367 * Halt loop.
368 */
369 PVM pVM = pUVCpu->pVM;
370 PVMCPU pVCpu = pUVCpu->pVCpu;
371
372 int rc = VINF_SUCCESS;
373 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
374 //unsigned cLoops = 0;
375 for (;;)
376 {
377 /*
378 * Work the timers and check if we can exit.
379 * The poll call gives us the ticks left to the next event in
380 * addition to perhaps set an FF.
381 */
382 uint64_t const u64StartTimers = RTTimeNanoTS();
383 TMR3TimerQueuesDo(pVM);
384 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
385 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
386 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
387 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
388 break;
389 uint64_t u64NanoTS;
390 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
391 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
392 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
393 break;
394
395 /*
396 * Wait for a while. Someone will wake us up or interrupt the call if
397 * anything needs our attention.
398 */
399 if (u64NanoTS < 50000)
400 {
401 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
402 /* spin */;
403 }
404 else
405 {
406 VMMR3YieldStop(pVM);
407 //uint64_t u64Start = RTTimeNanoTS();
408 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
409 {
410 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
411 uint64_t const u64StartSchedYield = RTTimeNanoTS();
412 RTThreadYield(); /* this is the best we can do here */
413 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
414 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
415 }
416 else if (u64NanoTS < 2000000)
417 {
418 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
419 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
420 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
421 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
422 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
423 }
424 else
425 {
426 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
427 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
428 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
429 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
430 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
431 }
432 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
433 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
434 }
435 if (rc == VERR_TIMEOUT)
436 rc = VINF_SUCCESS;
437 else if (RT_FAILURE(rc))
438 {
439 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
440 break;
441 }
442 }
443
444 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
445 return rc;
446}
447
448
449/**
450 * Initialize the configuration of halt method 1 & 2.
451 *
452 * @return VBox status code. Failure on invalid CFGM data.
453 * @param pUVM The user mode VM structure.
454 */
455static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
456{
457 /*
458 * The defaults.
459 */
460#if 1 /* DEBUGGING STUFF - REMOVE LATER */
461 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
462 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
463 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
464 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
465 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
466#else
467 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
468 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
469 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
470 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
471 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
472#endif
473
474 /*
475 * Query overrides.
476 *
477 * I don't have time to bother with niceties such as invalid value checks
478 * here right now. sorry.
479 */
480 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
481 if (pCfg)
482 {
483 uint32_t u32;
484 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
485 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
486 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
487 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
488 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
489 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
490 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
491 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
492 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
493 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
494 LogRel(("VMEmt: HaltedMethod1 config: %d/%d/%d/%d/%d\n",
495 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
496 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
497 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
498 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
499 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
500 }
501
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * Initialize halt method 1.
508 *
509 * @return VBox status code.
510 * @param pUVM Pointer to the user mode VM structure.
511 */
512static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
513{
514 return vmR3HaltMethod12ReadConfigU(pUVM);
515}
516
517
518/**
519 * Method 1 - Block whenever possible, and when lagging behind
520 * switch to spinning for 10-30ms with occasional blocking until
521 * the lag has been eliminated.
522 */
523static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
524{
525 PUVM pUVM = pUVCpu->pUVM;
526 PVMCPU pVCpu = pUVCpu->pVCpu;
527 PVM pVM = pUVCpu->pVM;
528
529 /*
530 * To simplify things, we decide up-front whether we should switch to spinning or
531 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
532 * and that it will generate interrupts or other events that will cause us to exit
533 * the halt loop.
534 */
535 bool fBlockOnce = false;
536 bool fSpinning = false;
537 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
538 if (u32CatchUpPct /* non-zero if catching up */)
539 {
540 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
541 {
542 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
543 if (fSpinning)
544 {
545 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
546 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
547 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
548 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
549 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
550 }
551 else
552 {
553 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
554 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
555 }
556 }
557 else
558 {
559 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
560 if (fSpinning)
561 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
562 }
563 }
564 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
565 {
566 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
567 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
568 }
569
570 /*
571 * Halt loop.
572 */
573 int rc = VINF_SUCCESS;
574 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
575 unsigned cLoops = 0;
576 for (;; cLoops++)
577 {
578 /*
579 * Work the timers and check if we can exit.
580 */
581 uint64_t const u64StartTimers = RTTimeNanoTS();
582 TMR3TimerQueuesDo(pVM);
583 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
584 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
585 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
586 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
587 break;
588
589 /*
590 * Estimate time left to the next event.
591 */
592 uint64_t u64NanoTS;
593 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
594 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
595 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
596 break;
597
598 /*
599 * Block if we're not spinning and the interval isn't all that small.
600 */
601 if ( ( !fSpinning
602 || fBlockOnce)
603#if 1 /* DEBUGGING STUFF - REMOVE LATER */
604 && u64NanoTS >= 100000) /* 0.100 ms */
605#else
606 && u64NanoTS >= 250000) /* 0.250 ms */
607#endif
608 {
609 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
610 VMMR3YieldStop(pVM);
611
612 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
613 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
614 cMilliSecs = 1;
615 else
616 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
617
618 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
619 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
620 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
621 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
622 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
623
624 if (rc == VERR_TIMEOUT)
625 rc = VINF_SUCCESS;
626 else if (RT_FAILURE(rc))
627 {
628 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
629 break;
630 }
631
632 /*
633 * Calc the statistics.
634 * Update averages every 16th time, and flush parts of the history every 64th time.
635 */
636 const uint64_t Elapsed = RTTimeNanoTS() - Start;
637 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
638 if (Elapsed > u64NanoTS)
639 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
640 pUVCpu->vm.s.Halt.Method12.cBlocks++;
641 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
642 {
643 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
644 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
645 {
646 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
647 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
648 }
649 }
650 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
651
652 /*
653 * Clear the block once flag if we actually blocked.
654 */
655 if ( fBlockOnce
656 && Elapsed > 100000 /* 0.1 ms */)
657 fBlockOnce = false;
658 }
659 }
660 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
661
662 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
663 return rc;
664}
665
666
667/**
668 * Initialize the global 1 halt method.
669 *
670 * @return VBox status code.
671 * @param pUVM Pointer to the user mode VM structure.
672 */
673static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
674{
675 /*
676 * The defaults.
677 */
678 uint32_t cNsResolution = SUPSemEventMultiGetResolution(pUVM->vm.s.pSession);
679 if (cNsResolution > 5*RT_NS_100US)
680 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 50000;
681 else if (cNsResolution > RT_NS_100US)
682 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = cNsResolution / 4;
683 else
684 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 2000;
685
686 /*
687 * Query overrides.
688 *
689 * I don't have time to bother with niceties such as invalid value checks
690 * here right now. sorry.
691 */
692 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedGlobal1");
693 if (pCfg)
694 {
695 uint32_t u32;
696 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "SpinBlockThreshold", &u32)))
697 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = u32;
698 }
699 LogRel(("VMEmt: HaltedGlobal1 config: cNsSpinBlockThresholdCfg=%u\n",
700 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg));
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * The global 1 halt method - Block in GMM (ring-0) and let it
707 * try take care of the global scheduling of EMT threads.
708 */
709static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
710{
711 PUVM pUVM = pUVCpu->pUVM;
712 PVMCPU pVCpu = pUVCpu->pVCpu;
713 PVM pVM = pUVCpu->pVM;
714 Assert(VMMGetCpu(pVM) == pVCpu);
715 NOREF(u64Now);
716
717 /*
718 * Halt loop.
719 */
720 //uint64_t u64NowLog, u64Start;
721 //u64Start = u64NowLog = RTTimeNanoTS();
722 int rc = VINF_SUCCESS;
723 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
724 unsigned cLoops = 0;
725 for (;; cLoops++)
726 {
727 /*
728 * Work the timers and check if we can exit.
729 */
730 uint64_t const u64StartTimers = RTTimeNanoTS();
731 TMR3TimerQueuesDo(pVM);
732 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
733 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
734 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
735 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
736 break;
737
738 /*
739 * Estimate time left to the next event.
740 */
741 //u64NowLog = RTTimeNanoTS();
742 uint64_t u64Delta;
743 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
744 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
745 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
746 break;
747
748 /*
749 * Block if we're not spinning and the interval isn't all that small.
750 */
751 if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
752 {
753 VMMR3YieldStop(pVM);
754 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
755 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
756 break;
757
758 //RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
759 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
760 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
761 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
762 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
763 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
764
765 if (rc == VERR_INTERRUPTED)
766 rc = VINF_SUCCESS;
767 else if (RT_FAILURE(rc))
768 {
769 rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Halt: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
770 break;
771 }
772 else
773 {
774 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
775 if (cNsOverslept > 50000)
776 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);
777 else if (cNsOverslept < -50000)
778 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt);
779 else
780 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt);
781 }
782 }
783 /*
784 * When spinning call upon the GVMM and do some wakups once
785 * in a while, it's not like we're actually busy or anything.
786 */
787 else if (!(cLoops & 0x1fff))
788 {
789 uint64_t const u64StartSchedYield = RTTimeNanoTS();
790 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
791 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
792 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
793 }
794 }
795 //RTLogPrintf("*** %u loops %'llu; lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM));
796
797 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
798 return rc;
799}
800
801
802/**
803 * The global 1 halt method - VMR3Wait() worker.
804 *
805 * @returns VBox status code.
806 * @param pUVCpu Pointer to the user mode VMCPU structure.
807 */
808static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
809{
810 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
811
812 PVM pVM = pUVCpu->pUVM->pVM;
813 PVMCPU pVCpu = VMMGetCpu(pVM);
814 Assert(pVCpu->idCpu == pUVCpu->idCpu);
815
816 int rc = VINF_SUCCESS;
817 for (;;)
818 {
819 /*
820 * Check Relevant FFs.
821 */
822 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
823 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
824 break;
825
826 /*
827 * Wait for a while. Someone will wake us up or interrupt the call if
828 * anything needs our attention.
829 */
830 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
831 if (rc == VERR_INTERRUPTED)
832 rc = VINF_SUCCESS;
833 else if (RT_FAILURE(rc))
834 {
835 rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Wait: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
836 break;
837 }
838 }
839
840 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
841 return rc;
842}
843
844
845/**
846 * The global 1 halt method - VMR3NotifyFF() worker.
847 *
848 * @param pUVCpu Pointer to the user mode VMCPU structure.
849 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
850 */
851static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
852{
853 /*
854 * With ring-0 halting, the fWait flag isn't set, so we have to check the
855 * CPU state to figure out whether to do a wakeup call.
856 */
857 PVMCPU pVCpu = pUVCpu->pVCpu;
858 if (pVCpu)
859 {
860 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
861 if (enmState == VMCPUSTATE_STARTED_HALTED || pUVCpu->vm.s.fWait)
862 {
863 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
864 AssertRC(rc);
865
866 }
867 else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
868 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
869 {
870 if (enmState == VMCPUSTATE_STARTED_EXEC)
871 {
872 if (fFlags & VMNOTIFYFF_FLAGS_POKE)
873 {
874 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
875 AssertRC(rc);
876 }
877 }
878 else if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
879 || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
880 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
881 }
882 }
883 /* This probably makes little sense: */
884 else if (pUVCpu->vm.s.fWait)
885 {
886 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
887 AssertRC(rc);
888 }
889}
890
891
892/**
893 * Bootstrap VMR3Wait() worker.
894 *
895 * @returns VBox status code.
896 * @param pUVCpu Pointer to the user mode VMCPU structure.
897 */
898static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
899{
900 PUVM pUVM = pUVCpu->pUVM;
901
902 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
903
904 int rc = VINF_SUCCESS;
905 for (;;)
906 {
907 /*
908 * Check Relevant FFs.
909 */
910 if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) /* global requests pending? */
911 break;
912 if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) /* local requests pending? */
913 break;
914
915 if ( pUVCpu->pVM
916 && ( VM_FF_IS_ANY_SET(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
917 || VMCPU_FF_IS_ANY_SET(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
918 )
919 )
920 break;
921 if (pUVM->vm.s.fTerminateEMT)
922 break;
923
924 /*
925 * Wait for a while. Someone will wake us up or interrupt the call if
926 * anything needs our attention.
927 */
928 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
929 if (rc == VERR_TIMEOUT)
930 rc = VINF_SUCCESS;
931 else if (RT_FAILURE(rc))
932 {
933 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
934 break;
935 }
936 }
937
938 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
939 return rc;
940}
941
942
943/**
944 * Bootstrap VMR3NotifyFF() worker.
945 *
946 * @param pUVCpu Pointer to the user mode VMCPU structure.
947 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
948 */
949static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
950{
951 if (pUVCpu->vm.s.fWait)
952 {
953 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
954 AssertRC(rc);
955 }
956 NOREF(fFlags);
957}
958
959
960/**
961 * Default VMR3Wait() worker.
962 *
963 * @returns VBox status code.
964 * @param pUVCpu Pointer to the user mode VMCPU structure.
965 */
966static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
967{
968 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
969
970 PVM pVM = pUVCpu->pVM;
971 PVMCPU pVCpu = pUVCpu->pVCpu;
972 int rc = VINF_SUCCESS;
973 for (;;)
974 {
975 /*
976 * Check Relevant FFs.
977 */
978 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
979 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
980 break;
981
982 /*
983 * Wait for a while. Someone will wake us up or interrupt the call if
984 * anything needs our attention.
985 */
986 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
987 if (rc == VERR_TIMEOUT)
988 rc = VINF_SUCCESS;
989 else if (RT_FAILURE(rc))
990 {
991 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc", rc);
992 break;
993 }
994 }
995
996 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
997 return rc;
998}
999
1000
1001/**
1002 * Default VMR3NotifyFF() worker.
1003 *
1004 * @param pUVCpu Pointer to the user mode VMCPU structure.
1005 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1006 */
1007static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
1008{
1009 if (pUVCpu->vm.s.fWait)
1010 {
1011 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
1012 AssertRC(rc);
1013 }
1014 else
1015 {
1016 PVMCPU pVCpu = pUVCpu->pVCpu;
1017 if (pVCpu)
1018 {
1019 VMCPUSTATE enmState = pVCpu->enmState;
1020 if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
1021 || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
1022 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
1023 }
1024 }
1025}
1026
1027
1028/**
1029 * Array with halt method descriptors.
1030 * VMINT::iHaltMethod contains an index into this array.
1031 */
1032static const struct VMHALTMETHODDESC
1033{
1034 /** The halt method ID. */
1035 VMHALTMETHOD enmHaltMethod;
1036 /** Set if the method support halting directly in ring-0. */
1037 bool fMayHaltInRing0;
1038 /** The init function for loading config and initialize variables. */
1039 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
1040 /** The term function. */
1041 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
1042 /** The VMR3WaitHaltedU function. */
1043 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
1044 /** The VMR3WaitU function. */
1045 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
1046 /** The VMR3NotifyCpuFFU function. */
1047 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
1048 /** The VMR3NotifyGlobalFFU function. */
1049 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
1050} g_aHaltMethods[] =
1051{
1052 { VMHALTMETHOD_BOOTSTRAP, false, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
1053 { VMHALTMETHOD_OLD, false, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
1054 { VMHALTMETHOD_1, false, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
1055 { VMHALTMETHOD_GLOBAL_1, true, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
1056};
1057
1058
1059/**
1060 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1061 *
1062 * This function is called by thread other than EMT to make
1063 * sure EMT wakes up and promptly service an FF request.
1064 *
1065 * @param pUVM Pointer to the user mode VM structure.
1066 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1067 * @internal
1068 */
1069VMMR3_INT_DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
1070{
1071 LogFlow(("VMR3NotifyGlobalFFU:\n"));
1072 uint32_t iHaltMethod = pUVM->vm.s.iHaltMethod;
1073
1074 if (g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
1075 g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF(pUVM, fFlags);
1076 else
1077 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
1078 g_aHaltMethods[iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
1079}
1080
1081
1082/**
1083 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1084 *
1085 * This function is called by thread other than EMT to make
1086 * sure EMT wakes up and promptly service an FF request.
1087 *
1088 * @param pUVCpu Pointer to the user mode per CPU VM structure.
1089 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1090 * @internal
1091 */
1092VMMR3_INT_DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
1093{
1094 PUVM pUVM = pUVCpu->pUVM;
1095
1096 LogFlow(("VMR3NotifyCpuFFU:\n"));
1097 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
1098}
1099
1100
1101/**
1102 * Halted VM Wait.
1103 * Any external event will unblock the thread.
1104 *
1105 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1106 * case an appropriate status code is returned.
1107 * @param pVM The cross context VM structure.
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param fFlags Combination of VMWAITHALTED_F_XXX.
1110 * @thread The emulation thread.
1111 * @remarks Made visible for implementing vmsvga sync register.
1112 * @internal
1113 */
1114VMMR3_INT_DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1115{
1116 LogFlow(("VMR3WaitHalted: fFlags=%#x\n", fFlags));
1117
1118 /*
1119 * Check Relevant FFs.
1120 */
1121#if defined(VBOX_VMM_TARGET_ARMV8)
1122 const uint32_t fMaskInterrupts = ((fFlags & VMWAITHALTED_F_IGNORE_IRQS) ? VMCPU_FF_INTERRUPT_IRQ : 0)
1123 | ((fFlags & VMWAITHALTED_F_IGNORE_FIQS) ? VMCPU_FF_INTERRUPT_FIQ : 0);
1124 const uint32_t fMask = VMCPU_FF_EXTERNAL_HALTED_MASK & ~fMaskInterrupts;
1125#else
1126 const uint32_t fMask = !(fFlags & VMWAITHALTED_F_IGNORE_IRQS)
1127 ? VMCPU_FF_EXTERNAL_HALTED_MASK
1128 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
1129#endif
1130
1131 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
1132 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
1133 {
1134 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#RX64)\n", pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
1135 return VINF_SUCCESS;
1136 }
1137
1138 /*
1139 * The yielder is suspended while we're halting, while TM might have clock(s) running
1140 * only at certain times and need to be notified..
1141 */
1142 if (pVCpu->idCpu == 0)
1143 VMMR3YieldSuspend(pVM);
1144 TMNotifyStartOfHalt(pVCpu);
1145
1146 /*
1147 * Record halt averages for the last second.
1148 */
1149 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1150 uint64_t u64Now = RTTimeNanoTS();
1151 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
1152 if (off > 1000000000)
1153 {
1154 if (off > _4G || !pUVCpu->vm.s.cHalts)
1155 {
1156 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1157 pUVCpu->vm.s.HaltFrequency = 1;
1158 }
1159 else
1160 {
1161 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1162 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
1163 }
1164 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1165 pUVCpu->vm.s.cHalts = 0;
1166 }
1167 pUVCpu->vm.s.cHalts++;
1168
1169 /*
1170 * Do the halt.
1171 */
1172 VMCPU_ASSERT_STATE_2(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM);
1173 VMCPUSTATE enmStateOld = VMCPU_GET_STATE(pVCpu);
1174 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
1175 PUVM pUVM = pUVCpu->pUVM;
1176 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
1177 VMCPU_SET_STATE(pVCpu, enmStateOld);
1178
1179 /*
1180 * Notify TM and resume the yielder
1181 */
1182 TMNotifyEndOfHalt(pVCpu);
1183 if (pVCpu->idCpu == 0)
1184 VMMR3YieldResume(pVM);
1185
1186 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
1187 return rc;
1188}
1189
1190
1191/**
1192 * Suspended VM Wait.
1193 * Only a handful of forced actions will cause the function to
1194 * return to the caller.
1195 *
1196 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1197 * case an appropriate status code is returned.
1198 * @param pUVCpu Pointer to the user mode VMCPU structure.
1199 * @thread The emulation thread.
1200 * @internal
1201 */
1202VMMR3_INT_DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
1203{
1204 LogFlow(("VMR3WaitU:\n"));
1205
1206 /*
1207 * Check Relevant FFs.
1208 */
1209 PVM pVM = pUVCpu->pVM;
1210 PVMCPU pVCpu = pUVCpu->pVCpu;
1211
1212 if ( pVM
1213 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
1214 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
1215 )
1216 )
1217 {
1218 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
1219 return VINF_SUCCESS;
1220 }
1221
1222 /*
1223 * Do waiting according to the halt method (so VMR3NotifyFF
1224 * doesn't have to special case anything).
1225 */
1226 PUVM pUVM = pUVCpu->pUVM;
1227 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
1228 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pUVM->pVM ? pUVM->pVM->fGlobalForcedActions : 0));
1229 return rc;
1230}
1231
1232
1233/**
1234 * Interface that PDMR3Suspend, PDMR3PowerOff and PDMR3Reset uses when they wait
1235 * for the handling of asynchronous notifications to complete.
1236 *
1237 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1238 * case an appropriate status code is returned.
1239 * @param pUVCpu Pointer to the user mode VMCPU structure.
1240 * @thread The emulation thread.
1241 */
1242VMMR3_INT_DECL(int) VMR3AsyncPdmNotificationWaitU(PUVMCPU pUVCpu)
1243{
1244 LogFlow(("VMR3AsyncPdmNotificationWaitU:\n"));
1245 return VMR3WaitU(pUVCpu);
1246}
1247
1248
1249/**
1250 * Interface that PDM the helper asynchronous notification completed methods
1251 * uses for EMT0 when it is waiting inside VMR3AsyncPdmNotificationWaitU().
1252 *
1253 * @param pUVM Pointer to the user mode VM structure.
1254 */
1255VMMR3_INT_DECL(void) VMR3AsyncPdmNotificationWakeupU(PUVM pUVM)
1256{
1257 LogFlow(("VMR3AsyncPdmNotificationWakeupU:\n"));
1258 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); /* this will have to do for now. */
1259 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[0], 0 /*fFlags*/);
1260}
1261
1262
1263/**
1264 * Rendezvous callback that will be called once.
1265 *
1266 * @returns VBox strict status code.
1267 * @param pVM The cross context VM structure.
1268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1269 * @param pvUser The new g_aHaltMethods index.
1270 */
1271static DECLCALLBACK(VBOXSTRICTRC) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
1272{
1273 PUVM pUVM = pVM->pUVM;
1274 int rc = VINF_SUCCESS;
1275 uintptr_t i = (uintptr_t)pvUser;
1276 Assert(i < RT_ELEMENTS(g_aHaltMethods));
1277
1278 /*
1279 * Main job is done once on EMT0 (it goes thru here first).
1280 */
1281 if (pVCpu->idCpu == 0)
1282 {
1283 /*
1284 * Terminate the old one.
1285 */
1286 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1287 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1288 {
1289 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1290 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1291 }
1292
1293 /* Assert that the failure fallback is where we expect. */
1294 Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
1295 Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);
1296
1297 /*
1298 * Init the new one.
1299 */
1300 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1301 if (g_aHaltMethods[i].pfnInit)
1302 {
1303 rc = g_aHaltMethods[i].pfnInit(pUVM);
1304 if (RT_FAILURE(rc))
1305 {
1306 /* Fall back on the bootstrap method. This requires no
1307 init/term (see assertion above), and will always work. */
1308 AssertLogRelRC(rc);
1309 i = 0;
1310 }
1311 }
1312
1313 /*
1314 * Commit it.
1315 */
1316 pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
1317 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1318 }
1319 else
1320 i = pUVM->vm.s.iHaltMethod;
1321
1322 /*
1323 * All EMTs must update their ring-0 halt configuration.
1324 */
1325 VMMR3SetMayHaltInRing0(pVCpu, g_aHaltMethods[i].fMayHaltInRing0,
1326 g_aHaltMethods[i].enmHaltMethod == VMHALTMETHOD_GLOBAL_1
1327 ? pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg : 0);
1328
1329 return rc;
1330}
1331
1332
1333/**
1334 * Changes the halt method.
1335 *
1336 * @returns VBox status code.
1337 * @param pUVM Pointer to the user mode VM structure.
1338 * @param enmHaltMethod The new halt method.
1339 * @thread EMT.
1340 */
1341int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1342{
1343 PVM pVM = pUVM->pVM; Assert(pVM);
1344 VM_ASSERT_EMT(pVM);
1345 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1346
1347 /*
1348 * Resolve default (can be overridden in the configuration).
1349 */
1350 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1351 {
1352 uint32_t u32;
1353 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1354 if (RT_SUCCESS(rc))
1355 {
1356 enmHaltMethod = (VMHALTMETHOD)u32;
1357 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1358 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1359 }
1360 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1361 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1362 else
1363 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1364 //enmHaltMethod = VMHALTMETHOD_1;
1365 //enmHaltMethod = VMHALTMETHOD_OLD;
1366 }
1367
1368 /*
1369 * The global halt method doesn't work in driverless mode, so fall back on
1370 * method #1 instead.
1371 */
1372 if (!SUPR3IsDriverless() || enmHaltMethod != VMHALTMETHOD_GLOBAL_1)
1373 LogRel(("VMEmt: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1374 else
1375 {
1376 LogRel(("VMEmt: Halt method %s (%d) not available in driverless mode, using %s (%d) instead\n",
1377 vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod, vmR3GetHaltMethodName(VMHALTMETHOD_1), VMHALTMETHOD_1));
1378 enmHaltMethod = VMHALTMETHOD_1;
1379 }
1380
1381
1382 /*
1383 * Find the descriptor.
1384 */
1385 unsigned i = 0;
1386 while ( i < RT_ELEMENTS(g_aHaltMethods)
1387 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1388 i++;
1389 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1390
1391 /*
1392 * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
1393 */
1394 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
1395}
1396
1397
1398/**
1399 * Special interface for implementing a HLT-like port on a device.
1400 *
1401 * This can be called directly from device code, provide the device is trusted
1402 * to access the VMM directly. Since we may not have an accurate register set
1403 * and the caller certainly shouldn't (device code does not access CPU
1404 * registers), this function will return when interrupts are pending regardless
1405 * of the actual EFLAGS.IF state.
1406 *
1407 * @returns VBox error status (never informational statuses).
1408 * @param pVM The cross context VM structure.
1409 * @param idCpu The id of the calling EMT.
1410 */
1411VMMR3DECL(int) VMR3WaitForDeviceReady(PVM pVM, VMCPUID idCpu)
1412{
1413 /*
1414 * Validate caller and resolve the CPU ID.
1415 */
1416 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1417 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1418 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1419 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1420
1421 /*
1422 * Tag along with the HLT mechanics for now.
1423 */
1424 int rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
1425 if (RT_SUCCESS(rc))
1426 return VINF_SUCCESS;
1427 return rc;
1428}
1429
1430
1431/**
1432 * Wakes up a CPU that has called VMR3WaitForDeviceReady.
1433 *
1434 * @returns VBox error status (never informational statuses).
1435 * @param pVM The cross context VM structure.
1436 * @param idCpu The id of the calling EMT.
1437 */
1438VMMR3DECL(int) VMR3NotifyCpuDeviceReady(PVM pVM, VMCPUID idCpu)
1439{
1440 /*
1441 * Validate caller and resolve the CPU ID.
1442 */
1443 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1444 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1445 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1446
1447 /*
1448 * Pretend it was an FF that got set since we've got logic for that already.
1449 */
1450 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
1451 return VINF_SUCCESS;
1452}
1453
1454
1455/**
1456 * Returns the number of active EMTs.
1457 *
1458 * This is used by the rendezvous code during VM destruction to avoid waiting
1459 * for EMTs that aren't around any more.
1460 *
1461 * @returns Number of active EMTs. 0 if invalid parameter.
1462 * @param pUVM The user mode VM structure.
1463 */
1464VMMR3_INT_DECL(uint32_t) VMR3GetActiveEmts(PUVM pUVM)
1465{
1466 UVM_ASSERT_VALID_EXT_RETURN(pUVM, 0);
1467 return pUVM->vm.s.cActiveEmts;
1468}
1469
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette