VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 99740

Last change on this file since 99740 was 99576, checked in by vboxsync, 18 months ago

VMM: Preparations for getting interrupts injected into the guest. With ARMv8 there are two types of interrupts (normal interrupts and fast interrupts) which need to be mapped to forced action flags. Because the PIC and APIC flags are not needed those are mapped to IRQs and FIQs on ARM respectively, bugref:10389

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.2 KB
Line 
1/* $Id: VMM.cpp 99576 2023-05-03 10:24:27Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28//#define NO_SUPCALLR0VMM
29
30/** @page pg_vmm VMM - The Virtual Machine Monitor
31 *
32 * The VMM component is two things at the moment, it's a component doing a few
33 * management and routing tasks, and it's the whole virtual machine monitor
34 * thing. For hysterical reasons, it is not doing all the management that one
35 * would expect, this is instead done by @ref pg_vm. We'll address this
36 * misdesign eventually, maybe.
37 *
38 * VMM is made up of these components:
39 * - @subpage pg_cfgm
40 * - @subpage pg_cpum
41 * - @subpage pg_dbgf
42 * - @subpage pg_em
43 * - @subpage pg_gim
44 * - @subpage pg_gmm
45 * - @subpage pg_gvmm
46 * - @subpage pg_hm
47 * - @subpage pg_iem
48 * - @subpage pg_iom
49 * - @subpage pg_mm
50 * - @subpage pg_nem
51 * - @subpage pg_pdm
52 * - @subpage pg_pgm
53 * - @subpage pg_selm
54 * - @subpage pg_ssm
55 * - @subpage pg_stam
56 * - @subpage pg_tm
57 * - @subpage pg_trpm
58 * - @subpage pg_vm
59 *
60 *
61 * @see @ref grp_vmm @ref grp_vm @subpage pg_vmm_guideline @subpage pg_raw
62 *
63 *
64 * @section sec_vmmstate VMM State
65 *
66 * @image html VM_Statechart_Diagram.gif
67 *
68 * To be written.
69 *
70 *
71 * @subsection subsec_vmm_init VMM Initialization
72 *
73 * To be written.
74 *
75 *
76 * @subsection subsec_vmm_term VMM Termination
77 *
78 * To be written.
79 *
80 *
81 * @section sec_vmm_limits VMM Limits
82 *
83 * There are various resource limits imposed by the VMM and it's
84 * sub-components. We'll list some of them here.
85 *
86 * On 64-bit hosts:
87 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
88 * can be increased up to 64K - 1.
89 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
90 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
91 * - A VM can be assigned all the memory we can use (16TB), however, the
92 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
93 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
94 *
95 * On 32-bit hosts:
96 * - Max 127 VMs. Imposed by GMM's per page structure.
97 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
98 * ROM pages. The limit is imposed by the 28-bit page ID used
99 * internally in GMM. It is also limited by PAE.
100 * - A VM can be assigned all the memory GMM can allocate, however, the
101 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
102 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
103 *
104 */
105
106
107/*********************************************************************************************************************************
108* Header Files *
109*********************************************************************************************************************************/
110#define LOG_GROUP LOG_GROUP_VMM
111#include <VBox/vmm/vmm.h>
112#include <VBox/vmm/vmapi.h>
113#include <VBox/vmm/pgm.h>
114#include <VBox/vmm/cfgm.h>
115#include <VBox/vmm/pdmqueue.h>
116#include <VBox/vmm/pdmcritsect.h>
117#include <VBox/vmm/pdmcritsectrw.h>
118#include <VBox/vmm/pdmapi.h>
119#include <VBox/vmm/cpum.h>
120#include <VBox/vmm/gim.h>
121#include <VBox/vmm/mm.h>
122#include <VBox/vmm/nem.h>
123#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
124# include <VBox/vmm/iem.h>
125#endif
126#include <VBox/vmm/iom.h>
127#include <VBox/vmm/trpm.h>
128#include <VBox/vmm/selm.h>
129#include <VBox/vmm/em.h>
130#include <VBox/sup.h>
131#include <VBox/vmm/dbgf.h>
132#if defined(VBOX_VMM_TARGET_ARMV8)
133# include <VBox/vmm/gic.h>
134#else
135# include <VBox/vmm/apic.h>
136#endif
137#include <VBox/vmm/ssm.h>
138#include <VBox/vmm/tm.h>
139#include "VMMInternal.h"
140#include <VBox/vmm/vmcc.h>
141
142#include <VBox/err.h>
143#include <VBox/param.h>
144#include <VBox/version.h>
145#include <VBox/vmm/hm.h>
146#include <iprt/assert.h>
147#include <iprt/alloc.h>
148#include <iprt/asm.h>
149#include <iprt/time.h>
150#include <iprt/semaphore.h>
151#include <iprt/stream.h>
152#include <iprt/string.h>
153#include <iprt/stdarg.h>
154#include <iprt/ctype.h>
155#include <iprt/x86.h>
156
157
158/*********************************************************************************************************************************
159* Defined Constants And Macros *
160*********************************************************************************************************************************/
161/** The saved state version. */
162#define VMM_SAVED_STATE_VERSION 4
163/** The saved state version used by v3.0 and earlier. (Teleportation) */
164#define VMM_SAVED_STATE_VERSION_3_0 3
165
166/** Macro for flushing the ring-0 logging. */
167#define VMM_FLUSH_R0_LOG(a_pVM, a_pVCpu, a_pLogger, a_pR3Logger) \
168 do { \
169 size_t const idxBuf = (a_pLogger)->idxBuf % VMMLOGGER_BUFFER_COUNT; \
170 if ( (a_pLogger)->aBufs[idxBuf].AuxDesc.offBuf == 0 \
171 || (a_pLogger)->aBufs[idxBuf].AuxDesc.fFlushedIndicator) \
172 { /* likely? */ } \
173 else \
174 vmmR3LogReturnFlush(a_pVM, a_pVCpu, a_pLogger, idxBuf, a_pR3Logger); \
175 } while (0)
176
177
178/*********************************************************************************************************************************
179* Internal Functions *
180*********************************************************************************************************************************/
181static void vmmR3InitRegisterStats(PVM pVM);
182static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
183static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
184#if 0 /* pointless when timers doesn't run on EMT */
185static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser);
186#endif
187static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
188 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
189static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu);
190static FNRTTHREAD vmmR3LogFlusher;
191static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf,
192 PRTLOGGER pDstLogger);
193static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
194
195
196
197/**
198 * Initializes the VMM.
199 *
200 * @returns VBox status code.
201 * @param pVM The cross context VM structure.
202 */
203VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
204{
205 LogFlow(("VMMR3Init\n"));
206
207 /*
208 * Assert alignment, sizes and order.
209 */
210 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
211 AssertCompile(RT_SIZEOFMEMB(VMCPU, vmm.s) <= RT_SIZEOFMEMB(VMCPU, vmm.padding));
212
213 /*
214 * Init basic VM VMM members.
215 */
216 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
217 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
218 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
219 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
220 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
221 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
222 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
223 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
224 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
225 pVM->vmm.s.nsProgramStart = RTTimeProgramStartNanoTS();
226
227#if 0 /* pointless when timers doesn't run on EMT */
228 /** @cfgm{/YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
229 * The EMT yield interval. The EMT yielding is a hack we employ to play a
230 * bit nicer with the rest of the system (like for instance the GUI).
231 */
232 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
233 23 /* Value arrived at after experimenting with the grub boot prompt. */);
234 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
235#endif
236
237 /** @cfgm{/VMM/UsePeriodicPreemptionTimers, boolean, true}
238 * Controls whether we employ per-cpu preemption timers to limit the time
239 * spent executing guest code. This option is not available on all
240 * platforms and we will silently ignore this setting then. If we are
241 * running in VT-x mode, we will use the VMX-preemption timer instead of
242 * this one when possible.
243 */
244 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
245 int rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
246 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
247
248 /*
249 * Initialize the VMM rendezvous semaphores.
250 */
251 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
252 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
253 return VERR_NO_MEMORY;
254 for (VMCPUID i = 0; i < pVM->cCpus; i++)
255 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
256 for (VMCPUID i = 0; i < pVM->cCpus; i++)
257 {
258 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
259 AssertRCReturn(rc, rc);
260 }
261 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
262 AssertRCReturn(rc, rc);
263 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
264 AssertRCReturn(rc, rc);
265 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
266 AssertRCReturn(rc, rc);
267 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
268 AssertRCReturn(rc, rc);
269 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPush);
270 AssertRCReturn(rc, rc);
271 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPop);
272 AssertRCReturn(rc, rc);
273 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
274 AssertRCReturn(rc, rc);
275 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
276 AssertRCReturn(rc, rc);
277
278 /*
279 * Register the saved state data unit.
280 */
281 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
282 NULL, NULL, NULL,
283 NULL, vmmR3Save, NULL,
284 NULL, vmmR3Load, NULL);
285 if (RT_FAILURE(rc))
286 return rc;
287
288 /*
289 * Register the Ring-0 VM handle with the session for fast ioctl calls.
290 */
291 bool const fDriverless = SUPR3IsDriverless();
292 if (!fDriverless)
293 {
294 rc = SUPR3SetVMForFastIOCtl(VMCC_GET_VMR0_FOR_CALL(pVM));
295 if (RT_FAILURE(rc))
296 return rc;
297 }
298
299#ifdef VBOX_WITH_NMI
300 /*
301 * Allocate mapping for the host APIC.
302 */
303 rc = MMR3HyperReserve(pVM, HOST_PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
304 AssertRC(rc);
305#endif
306 if (RT_SUCCESS(rc))
307 {
308 /*
309 * Start the log flusher thread.
310 */
311 if (!fDriverless)
312 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/,
313 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk");
314 if (RT_SUCCESS(rc))
315 {
316
317 /*
318 * Debug info and statistics.
319 */
320 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
321 vmmR3InitRegisterStats(pVM);
322 vmmInitFormatTypes();
323
324 return VINF_SUCCESS;
325 }
326 }
327 /** @todo Need failure cleanup? */
328
329 return rc;
330}
331
332
333/**
334 * VMMR3Init worker that register the statistics with STAM.
335 *
336 * @param pVM The cross context VM structure.
337 */
338static void vmmR3InitRegisterStats(PVM pVM)
339{
340 RT_NOREF_PV(pVM);
341
342 /* Nothing to do here in driverless mode. */
343 if (SUPR3IsDriverless())
344 return;
345
346 /*
347 * Statistics.
348 */
349 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
350 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
351 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
352 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
353 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
354 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
355 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
356 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
357 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
358 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
359 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
360 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
361 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
362 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_COMMIT_WRITE returns.");
363 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
364 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
365 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_COMMIT_WRITE returns.");
366 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
367 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
368 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
369 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRRead, STAMTYPE_COUNTER, "/VMM/RZRet/MSRRead", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_READ returns.");
370 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MSRWrite", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_WRITE returns.");
371 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
372 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
373 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
374 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
375 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
376 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
377 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
378 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
379 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
380 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
381 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
382 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
383 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Total, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
384 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns without responsible force flag.");
385 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3FF, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TO_R3.");
386 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_TM_VIRTUAL_SYNC.");
387 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PGM_NEED_HANDY_PAGES.");
388 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_QUEUES.");
389 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_EMT_RENDEZVOUS.");
390 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TIMER.");
391 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_DMA.");
392 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_PDM_CRITSECT.");
393 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iem, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IEM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IEM.");
394 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iom, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IOM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IOM.");
395 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
396 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
397 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
398 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
399 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
400 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
401
402 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes", STAMUNIT_OCCURENCES, "Total number of buffer flushes");
403 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherNoWakeUp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-NoWakups", STAMUNIT_OCCURENCES, "Times the flusher thread didn't need waking up.");
404
405 for (VMCPUID i = 0; i < pVM->cCpus; i++)
406 {
407 PVMCPU pVCpu = pVM->apCpusR3[i];
408 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlock", i);
409 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOnTime", i);
410 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOverslept", i);
411 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockInsomnia", i);
412 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExec, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec", i);
413 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExecFromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromSpin", i);
414 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExecFromBlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromBlock", i);
415 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3", i);
416 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3FromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/FromSpin", i);
417 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3Other, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/Other", i);
418 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PendingFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PendingFF", i);
419 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3SmallDelta, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/SmallDelta", i);
420 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PostNoInt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PostWaitNoInt", i);
421 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PostPendingFF,STAMTYPE_COUNTER,STAMVISIBILITY_ALWAYS,STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PostWaitPendingFF", i);
422 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0Halts, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryCounter", i);
423 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsSucceeded, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistorySucceeded", i);
424 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsToRing3, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryToRing3", i);
425
426 STAMR3RegisterF(pVM, &pVCpu->cEmtHashCollisions, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/VMM/EmtHashCollisions/Emt%02u", i);
427
428 PVMMR3CPULOGGER pShared = &pVCpu->vmm.s.u.s.Logger;
429 STAMR3RegisterF(pVM, &pShared->StatFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg", i);
430 STAMR3RegisterF(pVM, &pShared->StatCannotBlock, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg/CannotBlock", i);
431 STAMR3RegisterF(pVM, &pShared->StatWait, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Reg/Wait", i);
432 STAMR3RegisterF(pVM, &pShared->StatRaces, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Reg/Races", i);
433 STAMR3RegisterF(pVM, &pShared->StatRacesToR0, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg/RacesToR0", i);
434 STAMR3RegisterF(pVM, &pShared->cbDropped, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/cbDropped", i);
435 STAMR3RegisterF(pVM, &pShared->cbBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/cbBuf", i);
436 STAMR3RegisterF(pVM, &pShared->idxBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/idxBuf", i);
437
438 pShared = &pVCpu->vmm.s.u.s.RelLogger;
439 STAMR3RegisterF(pVM, &pShared->StatFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel", i);
440 STAMR3RegisterF(pVM, &pShared->StatCannotBlock, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel/CannotBlock", i);
441 STAMR3RegisterF(pVM, &pShared->StatWait, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Rel/Wait", i);
442 STAMR3RegisterF(pVM, &pShared->StatRaces, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Rel/Races", i);
443 STAMR3RegisterF(pVM, &pShared->StatRacesToR0, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel/RacesToR0", i);
444 STAMR3RegisterF(pVM, &pShared->cbDropped, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/cbDropped", i);
445 STAMR3RegisterF(pVM, &pShared->cbBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/cbBuf", i);
446 STAMR3RegisterF(pVM, &pShared->idxBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/idxBuf", i);
447 }
448}
449
450
451/**
452 * Worker for VMMR3InitR0 that calls ring-0 to do EMT specific initialization.
453 *
454 * @returns VBox status code.
455 * @param pVM The cross context VM structure.
456 * @param pVCpu The cross context per CPU structure.
457 * @thread EMT(pVCpu)
458 */
459static DECLCALLBACK(int) vmmR3InitR0Emt(PVM pVM, PVMCPU pVCpu)
460{
461 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_INIT_EMT, 0, NULL);
462}
463
464
465/**
466 * Initializes the R0 VMM.
467 *
468 * @returns VBox status code.
469 * @param pVM The cross context VM structure.
470 */
471VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
472{
473 int rc;
474 PVMCPU pVCpu = VMMGetCpu(pVM);
475 Assert(pVCpu && pVCpu->idCpu == 0);
476
477 /*
478 * Nothing to do here in driverless mode.
479 */
480 if (SUPR3IsDriverless())
481 return VINF_SUCCESS;
482
483 /*
484 * Make sure the ring-0 loggers are up to date.
485 */
486 rc = VMMR3UpdateLoggers(pVM);
487 if (RT_FAILURE(rc))
488 return rc;
489
490 /*
491 * Call Ring-0 entry with init code.
492 */
493#ifdef NO_SUPCALLR0VMM
494 //rc = VERR_GENERAL_FAILURE;
495 rc = VINF_SUCCESS;
496#else
497 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
498#endif
499
500 /*
501 * Flush the logs & deal with assertions.
502 */
503#ifdef LOG_ENABLED
504 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
505#endif
506 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
507 if (rc == VERR_VMM_RING0_ASSERTION)
508 rc = vmmR3HandleRing0Assert(pVM, pVCpu);
509 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
510 {
511 LogRel(("VMM: R0 init failed, rc=%Rra\n", rc));
512 if (RT_SUCCESS(rc))
513 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
514 }
515
516 /*
517 * Log stuff we learned in ring-0.
518 */
519 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
520 if (pVM->vmm.s.fIsUsingContextHooks)
521 LogRel(("VMM: Enabled thread-context hooks\n"));
522 else
523 LogRel(("VMM: Thread-context hooks unavailable\n"));
524
525 /* Log RTThreadPreemptIsPendingTrusty() and RTThreadPreemptIsPossible() results. */
526 if (pVM->vmm.s.fIsPreemptPendingApiTrusty)
527 LogRel(("VMM: RTThreadPreemptIsPending() can be trusted\n"));
528 else
529 LogRel(("VMM: Warning! RTThreadPreemptIsPending() cannot be trusted! Need to update kernel info?\n"));
530 if (pVM->vmm.s.fIsPreemptPossible)
531 LogRel(("VMM: Kernel preemption is possible\n"));
532 else
533 LogRel(("VMM: Kernel preemption is not possible it seems\n"));
534
535 /*
536 * Send all EMTs to ring-0 to get their logger initialized.
537 */
538 for (VMCPUID idCpu = 0; RT_SUCCESS(rc) && idCpu < pVM->cCpus; idCpu++)
539 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmmR3InitR0Emt, 2, pVM, pVM->apCpusR3[idCpu]);
540
541 return rc;
542}
543
544
545/**
546 * Called when an init phase completes.
547 *
548 * @returns VBox status code.
549 * @param pVM The cross context VM structure.
550 * @param enmWhat Which init phase.
551 */
552VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
553{
554 int rc = VINF_SUCCESS;
555
556 switch (enmWhat)
557 {
558 case VMINITCOMPLETED_RING3:
559 {
560#if 0 /* pointless when timers doesn't run on EMT */
561 /*
562 * Create the EMT yield timer.
563 */
564 rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, TMTIMER_FLAGS_NO_RING0,
565 "EMT Yielder", &pVM->vmm.s.hYieldTimer);
566 AssertRCReturn(rc, rc);
567
568 rc = TMTimerSetMillies(pVM, pVM->vmm.s.hYieldTimer, pVM->vmm.s.cYieldEveryMillies);
569 AssertRCReturn(rc, rc);
570#endif
571 break;
572 }
573
574 case VMINITCOMPLETED_HM:
575 {
576#if !defined(VBOX_VMM_TARGET_ARMV8)
577 /*
578 * Disable the periodic preemption timers if we can use the
579 * VMX-preemption timer instead.
580 */
581 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
582 && HMR3IsVmxPreemptionTimerUsed(pVM))
583 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
584 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
585#endif
586
587 /*
588 * Last chance for GIM to update its CPUID leaves if it requires
589 * knowledge/information from HM initialization.
590 */
591/** @todo r=bird: This shouldn't be done from here, but rather from VM.cpp. There is no dependency on VMM here. */
592 rc = GIMR3InitCompleted(pVM);
593 AssertRCReturn(rc, rc);
594
595 /*
596 * CPUM's post-initialization (print CPUIDs).
597 */
598 CPUMR3LogCpuIdAndMsrFeatures(pVM);
599 break;
600 }
601
602 default: /* shuts up gcc */
603 break;
604 }
605
606 return rc;
607}
608
609
610/**
611 * Terminate the VMM bits.
612 *
613 * @returns VBox status code.
614 * @param pVM The cross context VM structure.
615 */
616VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
617{
618 PVMCPU pVCpu = VMMGetCpu(pVM);
619 Assert(pVCpu && pVCpu->idCpu == 0);
620
621 /*
622 * Call Ring-0 entry with termination code.
623 */
624 int rc = VINF_SUCCESS;
625 if (!SUPR3IsDriverless())
626 {
627#ifndef NO_SUPCALLR0VMM
628 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
629#endif
630 }
631
632 /*
633 * Flush the logs & deal with assertions.
634 */
635#ifdef LOG_ENABLED
636 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
637#endif
638 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
639 if (rc == VERR_VMM_RING0_ASSERTION)
640 rc = vmmR3HandleRing0Assert(pVM, pVCpu);
641 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
642 {
643 LogRel(("VMM: VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
644 if (RT_SUCCESS(rc))
645 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
646 }
647
648 /*
649 * Do clean ups.
650 */
651 for (VMCPUID i = 0; i < pVM->cCpus; i++)
652 {
653 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
654 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
655 }
656 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
657 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
658 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
659 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
660 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
661 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
662 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
663 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
664 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
665 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
666 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
667 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
668 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
669 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
670 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
671 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
672
673 vmmTermFormatTypes();
674
675 /*
676 * Wait for the log flusher thread to complete.
677 */
678 if (pVM->vmm.s.hLogFlusherThread != NIL_RTTHREAD)
679 {
680 int rc2 = RTThreadWait(pVM->vmm.s.hLogFlusherThread, RT_MS_30SEC, NULL);
681 AssertLogRelRC(rc2);
682 if (RT_SUCCESS(rc2))
683 pVM->vmm.s.hLogFlusherThread = NIL_RTTHREAD;
684 }
685
686 return rc;
687}
688
689
690/**
691 * Applies relocations to data and code managed by this
692 * component. This function will be called at init and
693 * whenever the VMM need to relocate it self inside the GC.
694 *
695 * The VMM will need to apply relocations to the core code.
696 *
697 * @param pVM The cross context VM structure.
698 * @param offDelta The relocation delta.
699 */
700VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
701{
702 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
703 RT_NOREF(offDelta);
704
705 /*
706 * Update the logger.
707 */
708 VMMR3UpdateLoggers(pVM);
709}
710
711
712/**
713 * Worker for VMMR3UpdateLoggers.
714 */
715static int vmmR3UpdateLoggersWorker(PVM pVM, PVMCPU pVCpu, PRTLOGGER pSrcLogger, bool fReleaseLogger)
716{
717 /*
718 * Get the group count.
719 */
720 uint32_t uGroupsCrc32 = 0;
721 uint32_t cGroups = 0;
722 uint64_t fFlags = 0;
723 int rc = RTLogQueryBulk(pSrcLogger, &fFlags, &uGroupsCrc32, &cGroups, NULL);
724 Assert(rc == VERR_BUFFER_OVERFLOW);
725
726 /*
727 * Allocate the request of the right size.
728 */
729 uint32_t const cbReq = RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[cGroups]);
730 PVMMR0UPDATELOGGERSREQ pReq = (PVMMR0UPDATELOGGERSREQ)RTMemAllocZVar(cbReq);
731 if (pReq)
732 {
733 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
734 pReq->Hdr.cbReq = cbReq;
735 pReq->cGroups = cGroups;
736 rc = RTLogQueryBulk(pSrcLogger, &pReq->fFlags, &pReq->uGroupCrc32, &pReq->cGroups, pReq->afGroups);
737 AssertRC(rc);
738 if (RT_SUCCESS(rc))
739 {
740 /*
741 * The 64-bit value argument.
742 */
743 uint64_t fExtraArg = fReleaseLogger;
744
745 /* Only outputting to the parent VMM's logs? Enable ring-0 to flush directly. */
746 uint32_t fDst = RTLogGetDestinations(pSrcLogger);
747 fDst &= ~(RTLOGDEST_DUMMY | RTLOGDEST_F_NO_DENY | RTLOGDEST_F_DELAY_FILE | RTLOGDEST_FIXED_FILE | RTLOGDEST_FIXED_DIR);
748 if ( (fDst & (RTLOGDEST_VMM | RTLOGDEST_VMM_REL))
749 && !(fDst & ~(RTLOGDEST_VMM | RTLOGDEST_VMM_REL)))
750 fExtraArg |= (fDst & RTLOGDEST_VMM ? VMMR0UPDATELOGGER_F_TO_PARENT_VMM_DBG : 0)
751 | (fDst & RTLOGDEST_VMM_REL ? VMMR0UPDATELOGGER_F_TO_PARENT_VMM_REL : 0);
752
753 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_UPDATE_LOGGERS, fExtraArg, &pReq->Hdr);
754 }
755
756 RTMemFree(pReq);
757 }
758 else
759 rc = VERR_NO_MEMORY;
760 return rc;
761}
762
763
764/**
765 * Updates the settings for the RC and R0 loggers.
766 *
767 * @returns VBox status code.
768 * @param pVM The cross context VM structure.
769 * @thread EMT
770 */
771VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
772{
773 /* Nothing to do here if we're in driverless mode: */
774 if (SUPR3IsDriverless())
775 return VINF_SUCCESS;
776
777 PVMCPU pVCpu = VMMGetCpu(pVM);
778 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
779
780 /*
781 * Each EMT has each own logger instance.
782 */
783 /* Debug logging.*/
784 int rcDebug = VINF_SUCCESS;
785#ifdef LOG_ENABLED
786 PRTLOGGER const pDefault = RTLogDefaultInstance();
787 if (pDefault)
788 rcDebug = vmmR3UpdateLoggersWorker(pVM, pVCpu, pDefault, false /*fReleaseLogger*/);
789#else
790 RT_NOREF(pVM);
791#endif
792
793 /* Release logging. */
794 int rcRelease = VINF_SUCCESS;
795 PRTLOGGER const pRelease = RTLogRelGetDefaultInstance();
796 if (pRelease)
797 rcRelease = vmmR3UpdateLoggersWorker(pVM, pVCpu, pRelease, true /*fReleaseLogger*/);
798
799 return RT_SUCCESS(rcDebug) ? rcRelease : rcDebug;
800}
801
802
803/**
804 * @callback_method_impl{FNRTTHREAD, Ring-0 log flusher thread.}
805 */
806static DECLCALLBACK(int) vmmR3LogFlusher(RTTHREAD hThreadSelf, void *pvUser)
807{
808 PVM const pVM = (PVM)pvUser;
809 RT_NOREF(hThreadSelf);
810
811 /* Reset the flusher state before we start: */
812 pVM->vmm.s.LogFlusherItem.u32 = UINT32_MAX;
813
814 /*
815 * The work loop.
816 */
817 for (;;)
818 {
819 /*
820 * Wait for work.
821 */
822 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_VMMR0_LOG_FLUSHER, 0, NULL);
823 if (RT_SUCCESS(rc))
824 {
825 /* Paranoia: Make another copy of the request, to make sure the validated data can't be changed. */
826 VMMLOGFLUSHERENTRY Item;
827 Item.u32 = pVM->vmm.s.LogFlusherItem.u32;
828 if ( Item.s.idCpu < pVM->cCpus
829 && Item.s.idxLogger < VMMLOGGER_IDX_MAX
830 && Item.s.idxBuffer < VMMLOGGER_BUFFER_COUNT)
831 {
832 /*
833 * Verify the request.
834 */
835 PVMCPU const pVCpu = pVM->apCpusR3[Item.s.idCpu];
836 PVMMR3CPULOGGER const pShared = &pVCpu->vmm.s.u.aLoggers[Item.s.idxLogger];
837 uint32_t const cbToFlush = pShared->aBufs[Item.s.idxBuffer].AuxDesc.offBuf;
838 if (cbToFlush > 0)
839 {
840 if (cbToFlush <= pShared->cbBuf)
841 {
842 char * const pchBufR3 = pShared->aBufs[Item.s.idxBuffer].pchBufR3;
843 if (pchBufR3)
844 {
845 /*
846 * Do the flushing.
847 */
848 PRTLOGGER const pLogger = Item.s.idxLogger == VMMLOGGER_IDX_REGULAR
849 ? RTLogGetDefaultInstance() : RTLogRelGetDefaultInstance();
850 if (pLogger)
851 {
852 char szBefore[128];
853 RTStrPrintf(szBefore, sizeof(szBefore),
854 "*FLUSH* idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x fFlushed=%RTbool cbDropped=%#x\n",
855 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush,
856 pShared->aBufs[Item.s.idxBuffer].AuxDesc.fFlushedIndicator, pShared->cbDropped);
857 RTLogBulkWrite(pLogger, szBefore, pchBufR3, cbToFlush, "*FLUSH DONE*\n");
858 }
859 }
860 else
861 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! No ring-3 buffer pointer!\n",
862 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush));
863 }
864 else
865 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Exceeds %#x bytes buffer size!\n",
866 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush, pShared->cbBuf));
867 }
868 else
869 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Zero bytes to flush!\n",
870 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush));
871
872 /*
873 * Mark the descriptor as flushed and set the request flag for same.
874 */
875 pShared->aBufs[Item.s.idxBuffer].AuxDesc.fFlushedIndicator = true;
876 }
877 else
878 {
879 Assert(Item.s.idCpu == UINT16_MAX);
880 Assert(Item.s.idxLogger == UINT8_MAX);
881 Assert(Item.s.idxBuffer == UINT8_MAX);
882 }
883 }
884 /*
885 * Interrupted can happen, just ignore it.
886 */
887 else if (rc == VERR_INTERRUPTED)
888 { /* ignore*/ }
889 /*
890 * The ring-0 termination code will set the shutdown flag and wake us
891 * up, and we should return with object destroyed. In case there is
892 * some kind of race, we might also get sempahore destroyed.
893 */
894 else if ( rc == VERR_OBJECT_DESTROYED
895 || rc == VERR_SEM_DESTROYED
896 || rc == VERR_INVALID_HANDLE)
897 {
898 LogRel(("vmmR3LogFlusher: Terminating (%Rrc)\n", rc));
899 return VINF_SUCCESS;
900 }
901 /*
902 * There shouldn't be any other errors...
903 */
904 else
905 {
906 LogRelMax(64, ("vmmR3LogFlusher: VMMR0_DO_VMMR0_LOG_FLUSHER -> %Rrc\n", rc));
907 AssertRC(rc);
908 RTThreadSleep(1);
909 }
910 }
911}
912
913
914/**
915 * Helper for VMM_FLUSH_R0_LOG that does the flushing.
916 *
917 * @param pVM The cross context VM structure.
918 * @param pVCpu The cross context virtual CPU structure of the calling
919 * EMT.
920 * @param pShared The shared logger data.
921 * @param idxBuf The buffer to flush.
922 * @param pDstLogger The destination IPRT logger.
923 */
924static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf, PRTLOGGER pDstLogger)
925{
926 uint32_t const cbToFlush = pShared->aBufs[idxBuf].AuxDesc.offBuf;
927 const char *pszBefore = cbToFlush < 256 ? NULL : "*FLUSH*\n";
928 const char *pszAfter = cbToFlush < 256 ? NULL : "*END*\n";
929
930#if VMMLOGGER_BUFFER_COUNT > 1
931 /*
932 * When we have more than one log buffer, the flusher thread may still be
933 * working on the previous buffer when we get here.
934 */
935 char szBefore[64];
936 if (pShared->cFlushing > 0)
937 {
938 STAM_REL_PROFILE_START(&pShared->StatRaces, a);
939 uint64_t const nsStart = RTTimeNanoTS();
940
941 /* A no-op, but it takes the lock and the hope is that we end up waiting
942 on the flusher to finish up. */
943 RTLogBulkWrite(pDstLogger, NULL, "", 0, NULL);
944 if (pShared->cFlushing != 0)
945 {
946 RTLogBulkWrite(pDstLogger, NULL, "", 0, NULL);
947
948 /* If no luck, go to ring-0 and to proper waiting. */
949 if (pShared->cFlushing != 0)
950 {
951 STAM_REL_COUNTER_INC(&pShared->StatRacesToR0);
952 SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED, 0, NULL);
953 }
954 }
955
956 RTStrPrintf(szBefore, sizeof(szBefore), "*%sFLUSH* waited %'RU64 ns\n",
957 pShared->cFlushing == 0 ? "" : " MISORDERED", RTTimeNanoTS() - nsStart);
958 pszBefore = szBefore;
959 STAM_REL_PROFILE_STOP(&pShared->StatRaces, a);
960 }
961#else
962 RT_NOREF(pVM, pVCpu);
963#endif
964
965 RTLogBulkWrite(pDstLogger, pszBefore, pShared->aBufs[idxBuf].pchBufR3, cbToFlush, pszAfter);
966 pShared->aBufs[idxBuf].AuxDesc.fFlushedIndicator = true;
967}
968
969
970/**
971 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
972 *
973 * @returns Pointer to the buffer.
974 * @param pVM The cross context VM structure.
975 */
976VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
977{
978 return pVM->vmm.s.szRing0AssertMsg1;
979}
980
981
982/**
983 * Returns the VMCPU of the specified virtual CPU.
984 *
985 * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
986 *
987 * @param pUVM The user mode VM handle.
988 * @param idCpu The ID of the virtual CPU.
989 */
990VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
991{
992 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
993 AssertReturn(idCpu < pUVM->cCpus, NULL);
994 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
995 return pUVM->pVM->apCpusR3[idCpu];
996}
997
998
999/**
1000 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
1001 *
1002 * @returns Pointer to the buffer.
1003 * @param pVM The cross context VM structure.
1004 */
1005VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
1006{
1007 return pVM->vmm.s.szRing0AssertMsg2;
1008}
1009
1010
1011/**
1012 * Execute state save operation.
1013 *
1014 * @returns VBox status code.
1015 * @param pVM The cross context VM structure.
1016 * @param pSSM SSM operation handle.
1017 */
1018static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1019{
1020 LogFlow(("vmmR3Save:\n"));
1021
1022 /*
1023 * Save the started/stopped state of all CPUs except 0 as it will always
1024 * be running. This avoids breaking the saved state version. :-)
1025 */
1026 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1027 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVM->apCpusR3[i])));
1028
1029 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1030}
1031
1032
1033/**
1034 * Execute state load operation.
1035 *
1036 * @returns VBox status code.
1037 * @param pVM The cross context VM structure.
1038 * @param pSSM SSM operation handle.
1039 * @param uVersion Data layout version.
1040 * @param uPass The data pass.
1041 */
1042static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1043{
1044 LogFlow(("vmmR3Load:\n"));
1045 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1046
1047 /*
1048 * Validate version.
1049 */
1050 if ( uVersion != VMM_SAVED_STATE_VERSION
1051 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1052 {
1053 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1054 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1055 }
1056
1057 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1058 {
1059 /* Ignore the stack bottom, stack pointer and stack bits. */
1060 RTRCPTR RCPtrIgnored;
1061 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1062 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1063#ifdef RT_OS_DARWIN
1064 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1065 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1066 && SSMR3HandleRevision(pSSM) >= 48858
1067 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1068 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1069 )
1070 SSMR3Skip(pSSM, 16384);
1071 else
1072 SSMR3Skip(pSSM, 8192);
1073#else
1074 SSMR3Skip(pSSM, 8192);
1075#endif
1076 }
1077
1078 /*
1079 * Restore the VMCPU states. VCPU 0 is always started.
1080 */
1081 VMCPU_SET_STATE(pVM->apCpusR3[0], VMCPUSTATE_STARTED);
1082 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1083 {
1084 bool fStarted;
1085 int rc = SSMR3GetBool(pSSM, &fStarted);
1086 if (RT_FAILURE(rc))
1087 return rc;
1088 VMCPU_SET_STATE(pVM->apCpusR3[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1089 }
1090
1091 /* terminator */
1092 uint32_t u32;
1093 int rc = SSMR3GetU32(pSSM, &u32);
1094 if (RT_FAILURE(rc))
1095 return rc;
1096 if (u32 != UINT32_MAX)
1097 {
1098 AssertMsgFailed(("u32=%#x\n", u32));
1099 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1100 }
1101 return VINF_SUCCESS;
1102}
1103
1104
1105/**
1106 * Suspends the CPU yielder.
1107 *
1108 * @param pVM The cross context VM structure.
1109 */
1110VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1111{
1112#if 0 /* pointless when timers doesn't run on EMT */
1113 VMCPU_ASSERT_EMT(pVM->apCpusR3[0]);
1114 if (!pVM->vmm.s.cYieldResumeMillies)
1115 {
1116 uint64_t u64Now = TMTimerGet(pVM, pVM->vmm.s.hYieldTimer);
1117 uint64_t u64Expire = TMTimerGetExpire(pVM, pVM->vmm.s.hYieldTimer);
1118 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1119 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1120 else
1121 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM, pVM->vmm.s.hYieldTimer, u64Expire - u64Now);
1122 TMTimerStop(pVM, pVM->vmm.s.hYieldTimer);
1123 }
1124 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1125#else
1126 RT_NOREF(pVM);
1127#endif
1128}
1129
1130
1131/**
1132 * Stops the CPU yielder.
1133 *
1134 * @param pVM The cross context VM structure.
1135 */
1136VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1137{
1138#if 0 /* pointless when timers doesn't run on EMT */
1139 if (!pVM->vmm.s.cYieldResumeMillies)
1140 TMTimerStop(pVM, pVM->vmm.s.hYieldTimer);
1141 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1142 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1143#else
1144 RT_NOREF(pVM);
1145#endif
1146}
1147
1148
1149/**
1150 * Resumes the CPU yielder when it has been a suspended or stopped.
1151 *
1152 * @param pVM The cross context VM structure.
1153 */
1154VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1155{
1156#if 0 /* pointless when timers doesn't run on EMT */
1157 if (pVM->vmm.s.cYieldResumeMillies)
1158 {
1159 TMTimerSetMillies(pVM, pVM->vmm.s.hYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1160 pVM->vmm.s.cYieldResumeMillies = 0;
1161 }
1162#else
1163 RT_NOREF(pVM);
1164#endif
1165}
1166
1167
1168#if 0 /* pointless when timers doesn't run on EMT */
1169/**
1170 * @callback_method_impl{FNTMTIMERINT, EMT yielder}
1171 *
1172 * @todo This is a UNI core/thread thing, really... Should be reconsidered.
1173 */
1174static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
1175{
1176 NOREF(pvUser);
1177
1178 /*
1179 * This really needs some careful tuning. While we shouldn't be too greedy since
1180 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1181 * because that'll cause us to stop up.
1182 *
1183 * The current logic is to use the default interval when there is no lag worth
1184 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1185 *
1186 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1187 * so the lag is up to date.)
1188 */
1189 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1190 if ( u64Lag < 50000000 /* 50ms */
1191 || ( u64Lag < 1000000000 /* 1s */
1192 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1193 )
1194 {
1195 uint64_t u64Elapsed = RTTimeNanoTS();
1196 pVM->vmm.s.u64LastYield = u64Elapsed;
1197
1198 RTThreadYield();
1199
1200#ifdef LOG_ENABLED
1201 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1202 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1203#endif
1204 }
1205 TMTimerSetMillies(pVM, hTimer, pVM->vmm.s.cYieldEveryMillies);
1206}
1207#endif
1208
1209
1210/**
1211 * Executes guest code (Intel VT-x and AMD-V).
1212 *
1213 * @param pVM The cross context VM structure.
1214 * @param pVCpu The cross context virtual CPU structure.
1215 */
1216VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
1217{
1218#if defined(VBOX_VMM_TARGET_ARMV8)
1219 /* We should actually never get here as the only execution engine is NEM. */
1220 RT_NOREF(pVM, pVCpu);
1221 AssertReleaseFailed();
1222 return VERR_NOT_SUPPORTED;
1223#else
1224 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1225
1226 int rc;
1227 do
1228 {
1229# ifdef NO_SUPCALLR0VMM
1230 rc = VERR_GENERAL_FAILURE;
1231# else
1232 rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
1233 if (RT_LIKELY(rc == VINF_SUCCESS))
1234 rc = pVCpu->vmm.s.iLastGZRc;
1235# endif
1236 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1237
1238# if 0 /** @todo triggers too often */
1239 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
1240# endif
1241
1242 /*
1243 * Flush the logs
1244 */
1245# ifdef LOG_ENABLED
1246 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
1247# endif
1248 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
1249 if (rc != VERR_VMM_RING0_ASSERTION)
1250 {
1251 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1252 return rc;
1253 }
1254 return vmmR3HandleRing0Assert(pVM, pVCpu);
1255#endif
1256}
1257
1258
1259/**
1260 * Perform one of the fast I/O control VMMR0 operation.
1261 *
1262 * @returns VBox strict status code.
1263 * @param pVM The cross context VM structure.
1264 * @param pVCpu The cross context virtual CPU structure.
1265 * @param enmOperation The operation to perform.
1266 */
1267VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
1268{
1269 VBOXSTRICTRC rcStrict;
1270 do
1271 {
1272#ifdef NO_SUPCALLR0VMM
1273 rcStrict = VERR_GENERAL_FAILURE;
1274#else
1275 rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
1276 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1277 rcStrict = pVCpu->vmm.s.iLastGZRc;
1278#endif
1279 } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
1280
1281 /*
1282 * Flush the logs
1283 */
1284#ifdef LOG_ENABLED
1285 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
1286#endif
1287 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
1288 if (rcStrict != VERR_VMM_RING0_ASSERTION)
1289 return rcStrict;
1290 return vmmR3HandleRing0Assert(pVM, pVCpu);
1291}
1292
1293
1294/**
1295 * VCPU worker for VMMR3SendStartupIpi.
1296 *
1297 * @param pVM The cross context VM structure.
1298 * @param idCpu Virtual CPU to perform SIPI on.
1299 * @param uVector The SIPI vector.
1300 */
1301static DECLCALLBACK(int) vmmR3SendStarupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1302{
1303 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1304 VMCPU_ASSERT_EMT(pVCpu);
1305
1306 /*
1307 * In the INIT state, the target CPU is only responsive to an SIPI.
1308 * This is also true for when when the CPU is in VMX non-root mode.
1309 *
1310 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)".
1311 * See Intel spec. 26.6.2 "Activity State".
1312 */
1313 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1314 return VINF_SUCCESS;
1315
1316#if defined(VBOX_VMM_TARGET_ARMV8)
1317 AssertReleaseFailed(); /** @todo */
1318#else
1319 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1320# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1321 if (CPUMIsGuestInVmxRootMode(pCtx))
1322 {
1323 /* If the CPU is in VMX non-root mode we must cause a VM-exit. */
1324 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1325 return VBOXSTRICTRC_TODO(IEMExecVmxVmexitStartupIpi(pVCpu, uVector));
1326
1327 /* If the CPU is in VMX root mode (and not in VMX non-root mode) SIPIs are blocked. */
1328 return VINF_SUCCESS;
1329 }
1330# endif
1331
1332 pCtx->cs.Sel = uVector << 8;
1333 pCtx->cs.ValidSel = uVector << 8;
1334 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1335 pCtx->cs.u64Base = uVector << 12;
1336 pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
1337 pCtx->rip = 0;
1338#endif
1339
1340 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
1341
1342# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1343 EMSetState(pVCpu, EMSTATE_HALTED);
1344 return VINF_EM_RESCHEDULE;
1345# else /* And if we go the VMCPU::enmState way it can stay here. */
1346 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1347 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1348 return VINF_SUCCESS;
1349# endif
1350}
1351
1352
1353/**
1354 * VCPU worker for VMMR3SendInitIpi.
1355 *
1356 * @returns VBox status code.
1357 * @param pVM The cross context VM structure.
1358 * @param idCpu Virtual CPU to perform SIPI on.
1359 */
1360static DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1361{
1362 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1363 VMCPU_ASSERT_EMT(pVCpu);
1364
1365 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1366
1367 /** @todo r=ramshankar: We should probably block INIT signal when the CPU is in
1368 * wait-for-SIPI state. Verify. */
1369
1370 /* If the CPU is in VMX non-root mode, INIT signals cause VM-exits. */
1371#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1372 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1373 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1374 return VBOXSTRICTRC_TODO(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL, 0 /* uExitQual */));
1375#endif
1376
1377 /** @todo Figure out how to handle a SVM nested-guest intercepts here for INIT
1378 * IPI (e.g. SVM_EXIT_INIT). */
1379
1380 PGMR3ResetCpu(pVM, pVCpu);
1381 PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */
1382#if !defined(VBOX_VMM_TARGET_ARMV8)
1383 APICR3InitIpi(pVCpu);
1384#endif
1385 TRPMR3ResetCpu(pVCpu);
1386 CPUMR3ResetCpu(pVM, pVCpu);
1387 EMR3ResetCpu(pVCpu);
1388 HMR3ResetCpu(pVCpu);
1389 NEMR3ResetCpu(pVCpu, true /*fInitIpi*/);
1390
1391 /* This will trickle up on the target EMT. */
1392 return VINF_EM_WAIT_SIPI;
1393}
1394
1395
1396/**
1397 * Sends a Startup IPI to the virtual CPU by setting CS:EIP into
1398 * vector-dependent state and unhalting processor.
1399 *
1400 * @param pVM The cross context VM structure.
1401 * @param idCpu Virtual CPU to perform SIPI on.
1402 * @param uVector SIPI vector.
1403 */
1404VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1405{
1406 AssertReturnVoid(idCpu < pVM->cCpus);
1407
1408 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendStarupIpi, 3, pVM, idCpu, uVector);
1409 AssertRC(rc);
1410}
1411
1412
1413/**
1414 * Sends init IPI to the virtual CPU.
1415 *
1416 * @param pVM The cross context VM structure.
1417 * @param idCpu Virtual CPU to perform int IPI on.
1418 */
1419VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1420{
1421 AssertReturnVoid(idCpu < pVM->cCpus);
1422
1423 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1424 AssertRC(rc);
1425}
1426
1427
1428/**
1429 * Registers the guest memory range that can be used for patching.
1430 *
1431 * @returns VBox status code.
1432 * @param pVM The cross context VM structure.
1433 * @param pPatchMem Patch memory range.
1434 * @param cbPatchMem Size of the memory range.
1435 */
1436VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1437{
1438 VM_ASSERT_EMT(pVM);
1439 if (HMIsEnabled(pVM))
1440 return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1441
1442 return VERR_NOT_SUPPORTED;
1443}
1444
1445
1446/**
1447 * Deregisters the guest memory range that can be used for patching.
1448 *
1449 * @returns VBox status code.
1450 * @param pVM The cross context VM structure.
1451 * @param pPatchMem Patch memory range.
1452 * @param cbPatchMem Size of the memory range.
1453 */
1454VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1455{
1456 if (HMIsEnabled(pVM))
1457 return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1458
1459 return VINF_SUCCESS;
1460}
1461
1462
1463/**
1464 * Common recursion handler for the other EMTs.
1465 *
1466 * @returns Strict VBox status code.
1467 * @param pVM The cross context VM structure.
1468 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1469 * @param rcStrict Current status code to be combined with the one
1470 * from this recursion and returned.
1471 */
1472static VBOXSTRICTRC vmmR3EmtRendezvousCommonRecursion(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
1473{
1474 int rc2;
1475
1476 /*
1477 * We wait here while the initiator of this recursion reconfigures
1478 * everything. The last EMT to get in signals the initiator.
1479 */
1480 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) == pVM->cCpus)
1481 {
1482 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1483 AssertLogRelRC(rc2);
1484 }
1485
1486 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPush, RT_INDEFINITE_WAIT);
1487 AssertLogRelRC(rc2);
1488
1489 /*
1490 * Do the normal rendezvous processing.
1491 */
1492 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1493 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1494
1495 /*
1496 * Wait for the initiator to restore everything.
1497 */
1498 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPop, RT_INDEFINITE_WAIT);
1499 AssertLogRelRC(rc2);
1500
1501 /*
1502 * Last thread out of here signals the initiator.
1503 */
1504 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) == pVM->cCpus)
1505 {
1506 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1507 AssertLogRelRC(rc2);
1508 }
1509
1510 /*
1511 * Merge status codes and return.
1512 */
1513 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
1514 if ( rcStrict2 != VINF_SUCCESS
1515 && ( rcStrict == VINF_SUCCESS
1516 || rcStrict > rcStrict2))
1517 rcStrict = rcStrict2;
1518 return rcStrict;
1519}
1520
1521
1522/**
1523 * Count returns and have the last non-caller EMT wake up the caller.
1524 *
1525 * @returns VBox strict informational status code for EM scheduling. No failures
1526 * will be returned here, those are for the caller only.
1527 *
1528 * @param pVM The cross context VM structure.
1529 * @param rcStrict The current accumulated recursive status code,
1530 * to be merged with i32RendezvousStatus and
1531 * returned.
1532 */
1533DECL_FORCE_INLINE(VBOXSTRICTRC) vmmR3EmtRendezvousNonCallerReturn(PVM pVM, VBOXSTRICTRC rcStrict)
1534{
1535 VBOXSTRICTRC rcStrict2 = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1536
1537 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1538 if (cReturned == pVM->cCpus - 1U)
1539 {
1540 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1541 AssertLogRelRC(rc);
1542 }
1543
1544 /*
1545 * Merge the status codes, ignoring error statuses in this code path.
1546 */
1547 AssertLogRelMsgReturn( rcStrict2 <= VINF_SUCCESS
1548 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1549 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1550 VERR_IPE_UNEXPECTED_INFO_STATUS);
1551
1552 if (RT_SUCCESS(rcStrict2))
1553 {
1554 if ( rcStrict2 != VINF_SUCCESS
1555 && ( rcStrict == VINF_SUCCESS
1556 || rcStrict > rcStrict2))
1557 rcStrict = rcStrict2;
1558 }
1559 return rcStrict;
1560}
1561
1562
1563/**
1564 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1565 *
1566 * @returns VBox strict informational status code for EM scheduling. No failures
1567 * will be returned here, those are for the caller only. When
1568 * fIsCaller is set, VINF_SUCCESS is always returned.
1569 *
1570 * @param pVM The cross context VM structure.
1571 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1572 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1573 * not.
1574 * @param fFlags The flags.
1575 * @param pfnRendezvous The callback.
1576 * @param pvUser The user argument for the callback.
1577 */
1578static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1579 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1580{
1581 int rc;
1582 VBOXSTRICTRC rcStrictRecursion = VINF_SUCCESS;
1583
1584 /*
1585 * Enter, the last EMT triggers the next callback phase.
1586 */
1587 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1588 if (cEntered != pVM->cCpus)
1589 {
1590 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1591 {
1592 /* Wait for our turn. */
1593 for (;;)
1594 {
1595 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1596 AssertLogRelRC(rc);
1597 if (!pVM->vmm.s.fRendezvousRecursion)
1598 break;
1599 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1600 }
1601 }
1602 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1603 {
1604 /* Wait for the last EMT to arrive and wake everyone up. */
1605 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1606 AssertLogRelRC(rc);
1607 Assert(!pVM->vmm.s.fRendezvousRecursion);
1608 }
1609 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1610 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1611 {
1612 /* Wait for our turn. */
1613 for (;;)
1614 {
1615 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1616 AssertLogRelRC(rc);
1617 if (!pVM->vmm.s.fRendezvousRecursion)
1618 break;
1619 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1620 }
1621 }
1622 else
1623 {
1624 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1625
1626 /*
1627 * The execute once is handled specially to optimize the code flow.
1628 *
1629 * The last EMT to arrive will perform the callback and the other
1630 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1631 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1632 * returns, that EMT will initiate the normal return sequence.
1633 */
1634 if (!fIsCaller)
1635 {
1636 for (;;)
1637 {
1638 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1639 AssertLogRelRC(rc);
1640 if (!pVM->vmm.s.fRendezvousRecursion)
1641 break;
1642 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1643 }
1644
1645 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1646 }
1647 return VINF_SUCCESS;
1648 }
1649 }
1650 else
1651 {
1652 /*
1653 * All EMTs are waiting, clear the FF and take action according to the
1654 * execution method.
1655 */
1656 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1657
1658 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1659 {
1660 /* Wake up everyone. */
1661 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1662 AssertLogRelRC(rc);
1663 }
1664 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1665 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1666 {
1667 /* Figure out who to wake up and wake it up. If it's ourself, then
1668 it's easy otherwise wait for our turn. */
1669 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1670 ? 0
1671 : pVM->cCpus - 1U;
1672 if (pVCpu->idCpu != iFirst)
1673 {
1674 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1675 AssertLogRelRC(rc);
1676 for (;;)
1677 {
1678 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1679 AssertLogRelRC(rc);
1680 if (!pVM->vmm.s.fRendezvousRecursion)
1681 break;
1682 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1683 }
1684 }
1685 }
1686 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1687 }
1688
1689
1690 /*
1691 * Do the callback and update the status if necessary.
1692 */
1693 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1694 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1695 {
1696 VBOXSTRICTRC rcStrict2 = pfnRendezvous(pVM, pVCpu, pvUser);
1697 if (rcStrict2 != VINF_SUCCESS)
1698 {
1699 AssertLogRelMsg( rcStrict2 <= VINF_SUCCESS
1700 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1701 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
1702 int32_t i32RendezvousStatus;
1703 do
1704 {
1705 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1706 if ( rcStrict2 == i32RendezvousStatus
1707 || RT_FAILURE(i32RendezvousStatus)
1708 || ( i32RendezvousStatus != VINF_SUCCESS
1709 && rcStrict2 > i32RendezvousStatus))
1710 break;
1711 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict2), i32RendezvousStatus));
1712 }
1713 }
1714
1715 /*
1716 * Increment the done counter and take action depending on whether we're
1717 * the last to finish callback execution.
1718 */
1719 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1720 if ( cDone != pVM->cCpus
1721 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1722 {
1723 /* Signal the next EMT? */
1724 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1725 {
1726 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1727 AssertLogRelRC(rc);
1728 }
1729 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1730 {
1731 Assert(cDone == pVCpu->idCpu + 1U);
1732 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
1733 AssertLogRelRC(rc);
1734 }
1735 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1736 {
1737 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
1738 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
1739 AssertLogRelRC(rc);
1740 }
1741
1742 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
1743 if (!fIsCaller)
1744 {
1745 for (;;)
1746 {
1747 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1748 AssertLogRelRC(rc);
1749 if (!pVM->vmm.s.fRendezvousRecursion)
1750 break;
1751 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1752 }
1753 }
1754 }
1755 else
1756 {
1757 /* Callback execution is all done, tell the rest to return. */
1758 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1759 AssertLogRelRC(rc);
1760 }
1761
1762 if (!fIsCaller)
1763 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1764 return rcStrictRecursion;
1765}
1766
1767
1768/**
1769 * Called in response to VM_FF_EMT_RENDEZVOUS.
1770 *
1771 * @returns VBox strict status code - EM scheduling. No errors will be returned
1772 * here, nor will any non-EM scheduling status codes be returned.
1773 *
1774 * @param pVM The cross context VM structure.
1775 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1776 *
1777 * @thread EMT
1778 */
1779VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
1780{
1781 Assert(!pVCpu->vmm.s.fInRendezvous);
1782 Log(("VMMR3EmtRendezvousFF: EMT%#u\n", pVCpu->idCpu));
1783 pVCpu->vmm.s.fInRendezvous = true;
1784 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1785 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1786 pVCpu->vmm.s.fInRendezvous = false;
1787 Log(("VMMR3EmtRendezvousFF: EMT%#u returns %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
1788 return VBOXSTRICTRC_TODO(rcStrict);
1789}
1790
1791
1792/**
1793 * Helper for resetting an single wakeup event sempahore.
1794 *
1795 * @returns VERR_TIMEOUT on success, RTSemEventWait status otherwise.
1796 * @param hEvt The event semaphore to reset.
1797 */
1798static int vmmR3HlpResetEvent(RTSEMEVENT hEvt)
1799{
1800 for (uint32_t cLoops = 0; ; cLoops++)
1801 {
1802 int rc = RTSemEventWait(hEvt, 0 /*cMsTimeout*/);
1803 if (rc != VINF_SUCCESS || cLoops > _4K)
1804 return rc;
1805 }
1806}
1807
1808
1809/**
1810 * Worker for VMMR3EmtRendezvous that handles recursion.
1811 *
1812 * @returns VBox strict status code. This will be the first error,
1813 * VINF_SUCCESS, or an EM scheduling status code.
1814 *
1815 * @param pVM The cross context VM structure.
1816 * @param pVCpu The cross context virtual CPU structure of the
1817 * calling EMT.
1818 * @param fFlags Flags indicating execution methods. See
1819 * grp_VMMR3EmtRendezvous_fFlags.
1820 * @param pfnRendezvous The callback.
1821 * @param pvUser User argument for the callback.
1822 *
1823 * @thread EMT(pVCpu)
1824 */
1825static VBOXSTRICTRC vmmR3EmtRendezvousRecursive(PVM pVM, PVMCPU pVCpu, uint32_t fFlags,
1826 PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1827{
1828 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d\n", fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions));
1829 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
1830 Assert(pVCpu->vmm.s.fInRendezvous);
1831
1832 /*
1833 * Save the current state.
1834 */
1835 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
1836 uint32_t const cParentDone = pVM->vmm.s.cRendezvousEmtsDone;
1837 int32_t const iParentStatus = pVM->vmm.s.i32RendezvousStatus;
1838 PFNVMMEMTRENDEZVOUS const pfnParent = pVM->vmm.s.pfnRendezvous;
1839 void * const pvParentUser = pVM->vmm.s.pvRendezvousUser;
1840
1841 /*
1842 * Check preconditions and save the current state.
1843 */
1844 AssertReturn( (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1845 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
1846 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
1847 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
1848 VERR_INTERNAL_ERROR);
1849 AssertReturn(pVM->vmm.s.cRendezvousEmtsEntered == pVM->cCpus, VERR_INTERNAL_ERROR_2);
1850 AssertReturn(pVM->vmm.s.cRendezvousEmtsReturned == 0, VERR_INTERNAL_ERROR_3);
1851
1852 /*
1853 * Reset the recursion prep and pop semaphores.
1854 */
1855 int rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
1856 AssertLogRelRCReturn(rc, rc);
1857 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
1858 AssertLogRelRCReturn(rc, rc);
1859 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1860 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
1861 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1862 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
1863
1864 /*
1865 * Usher the other thread into the recursion routine.
1866 */
1867 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush, 0);
1868 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, true);
1869
1870 uint32_t cLeft = pVM->cCpus - (cParentDone + 1U);
1871 if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1872 while (cLeft-- > 0)
1873 {
1874 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1875 AssertLogRelRC(rc);
1876 }
1877 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1878 {
1879 Assert(cLeft == pVM->cCpus - (pVCpu->idCpu + 1U));
1880 for (VMCPUID iCpu = pVCpu->idCpu + 1U; iCpu < pVM->cCpus; iCpu++)
1881 {
1882 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu]);
1883 AssertLogRelRC(rc);
1884 }
1885 }
1886 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1887 {
1888 Assert(cLeft == pVCpu->idCpu);
1889 for (VMCPUID iCpu = pVCpu->idCpu; iCpu > 0; iCpu--)
1890 {
1891 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu - 1U]);
1892 AssertLogRelRC(rc);
1893 }
1894 }
1895 else
1896 AssertLogRelReturn((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
1897 VERR_INTERNAL_ERROR_4);
1898
1899 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1900 AssertLogRelRC(rc);
1901 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1902 AssertLogRelRC(rc);
1903
1904
1905 /*
1906 * Wait for the EMTs to wake up and get out of the parent rendezvous code.
1907 */
1908 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) != pVM->cCpus)
1909 {
1910 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPushCaller, RT_INDEFINITE_WAIT);
1911 AssertLogRelRC(rc);
1912 }
1913
1914 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, false);
1915
1916 /*
1917 * Clear the slate and setup the new rendezvous.
1918 */
1919 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1920 {
1921 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
1922 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1923 }
1924 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1925 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1926 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1927 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1928
1929 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
1930 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
1931 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1932 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
1933 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
1934 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
1935 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
1936 ASMAtomicIncU32(&pVM->vmm.s.cRendezvousRecursions);
1937
1938 /*
1939 * We're ready to go now, do normal rendezvous processing.
1940 */
1941 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
1942 AssertLogRelRC(rc);
1943
1944 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /*fIsCaller*/, fFlags, pfnRendezvous, pvUser);
1945
1946 /*
1947 * The caller waits for the other EMTs to be done, return and waiting on the
1948 * pop semaphore.
1949 */
1950 for (;;)
1951 {
1952 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
1953 AssertLogRelRC(rc);
1954 if (!pVM->vmm.s.fRendezvousRecursion)
1955 break;
1956 rcStrict = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict);
1957 }
1958
1959 /*
1960 * Get the return code and merge it with the above recursion status.
1961 */
1962 VBOXSTRICTRC rcStrict2 = pVM->vmm.s.i32RendezvousStatus;
1963 if ( rcStrict2 != VINF_SUCCESS
1964 && ( rcStrict == VINF_SUCCESS
1965 || rcStrict > rcStrict2))
1966 rcStrict = rcStrict2;
1967
1968 /*
1969 * Restore the parent rendezvous state.
1970 */
1971 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1972 {
1973 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
1974 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1975 }
1976 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1977 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1978 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1979 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1980
1981 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, pVM->cCpus);
1982 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1983 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, cParentDone);
1984 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, iParentStatus);
1985 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fParentFlags);
1986 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvParentUser);
1987 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnParent);
1988
1989 /*
1990 * Usher the other EMTs back to their parent recursion routine, waiting
1991 * for them to all get there before we return (makes sure they've been
1992 * scheduled and are past the pop event sem, see below).
1993 */
1994 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop, 0);
1995 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
1996 AssertLogRelRC(rc);
1997
1998 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) != pVM->cCpus)
1999 {
2000 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPopCaller, RT_INDEFINITE_WAIT);
2001 AssertLogRelRC(rc);
2002 }
2003
2004 /*
2005 * We must reset the pop semaphore on the way out (doing the pop caller too,
2006 * just in case). The parent may be another recursion.
2007 */
2008 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop); AssertLogRelRC(rc);
2009 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
2010
2011 ASMAtomicDecU32(&pVM->vmm.s.cRendezvousRecursions);
2012
2013 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d returns %Rrc\n",
2014 fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions, VBOXSTRICTRC_VAL(rcStrict)));
2015 return rcStrict;
2016}
2017
2018
2019/**
2020 * EMT rendezvous.
2021 *
2022 * Gathers all the EMTs and execute some code on each of them, either in a one
2023 * by one fashion or all at once.
2024 *
2025 * @returns VBox strict status code. This will be the first error,
2026 * VINF_SUCCESS, or an EM scheduling status code.
2027 *
2028 * @retval VERR_DEADLOCK if recursion is attempted using a rendezvous type that
2029 * doesn't support it or if the recursion is too deep.
2030 *
2031 * @param pVM The cross context VM structure.
2032 * @param fFlags Flags indicating execution methods. See
2033 * grp_VMMR3EmtRendezvous_fFlags. The one-by-one,
2034 * descending and ascending rendezvous types support
2035 * recursion from inside @a pfnRendezvous.
2036 * @param pfnRendezvous The callback.
2037 * @param pvUser User argument for the callback.
2038 *
2039 * @thread Any.
2040 */
2041VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
2042{
2043 /*
2044 * Validate input.
2045 */
2046 AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
2047 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
2048 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2049 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
2050 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
2051 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
2052 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
2053 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
2054
2055 VBOXSTRICTRC rcStrict;
2056 PVMCPU pVCpu = VMMGetCpu(pVM);
2057 if (!pVCpu)
2058 {
2059 /*
2060 * Forward the request to an EMT thread.
2061 */
2062 Log(("VMMR3EmtRendezvous: %#x non-EMT\n", fFlags));
2063 if (!(fFlags & VMMEMTRENDEZVOUS_FLAGS_PRIORITY))
2064 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2065 else
2066 rcStrict = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2067 Log(("VMMR3EmtRendezvous: %#x non-EMT returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2068 }
2069 else if ( pVM->cCpus == 1
2070 || ( pVM->enmVMState == VMSTATE_DESTROYING
2071 && VMR3GetActiveEmts(pVM->pUVM) < pVM->cCpus ) )
2072 {
2073 /*
2074 * Shortcut for the single EMT case.
2075 *
2076 * We also ends up here if EMT(0) (or others) tries to issue a rendezvous
2077 * during vmR3Destroy after other emulation threads have started terminating.
2078 */
2079 if (!pVCpu->vmm.s.fInRendezvous)
2080 {
2081 Log(("VMMR3EmtRendezvous: %#x EMT (uni)\n", fFlags));
2082 pVCpu->vmm.s.fInRendezvous = true;
2083 pVM->vmm.s.fRendezvousFlags = fFlags;
2084 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2085 pVCpu->vmm.s.fInRendezvous = false;
2086 }
2087 else
2088 {
2089 /* Recursion. Do the same checks as in the SMP case. */
2090 Log(("VMMR3EmtRendezvous: %#x EMT (uni), recursion depth=%d\n", fFlags, pVM->vmm.s.cRendezvousRecursions));
2091 uint32_t fType = pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK;
2092 AssertLogRelReturn( !pVCpu->vmm.s.fInRendezvous
2093 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2094 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2095 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2096 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2097 , VERR_DEADLOCK);
2098
2099 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
2100 pVM->vmm.s.cRendezvousRecursions++;
2101 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
2102 pVM->vmm.s.fRendezvousFlags = fFlags;
2103
2104 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2105
2106 pVM->vmm.s.fRendezvousFlags = fParentFlags;
2107 pVM->vmm.s.cRendezvousRecursions--;
2108 }
2109 Log(("VMMR3EmtRendezvous: %#x EMT (uni) returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2110 }
2111 else
2112 {
2113 /*
2114 * Spin lock. If busy, check for recursion, if not recursing wait for
2115 * the other EMT to finish while keeping a lookout for the RENDEZVOUS FF.
2116 */
2117 int rc;
2118 rcStrict = VINF_SUCCESS;
2119 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
2120 {
2121 /* Allow recursion in some cases. */
2122 if ( pVCpu->vmm.s.fInRendezvous
2123 && ( (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2124 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2125 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2126 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2127 ))
2128 return VBOXSTRICTRC_TODO(vmmR3EmtRendezvousRecursive(pVM, pVCpu, fFlags, pfnRendezvous, pvUser));
2129
2130 AssertLogRelMsgReturn(!pVCpu->vmm.s.fInRendezvous, ("fRendezvousFlags=%#x\n", pVM->vmm.s.fRendezvousFlags),
2131 VERR_DEADLOCK);
2132
2133 Log(("VMMR3EmtRendezvous: %#x EMT#%u, waiting for lock...\n", fFlags, pVCpu->idCpu));
2134 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
2135 {
2136 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2137 {
2138 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
2139 if ( rc != VINF_SUCCESS
2140 && ( rcStrict == VINF_SUCCESS
2141 || rcStrict > rc))
2142 rcStrict = rc;
2143 /** @todo Perhaps deal with termination here? */
2144 }
2145 ASMNopPause();
2146 }
2147 }
2148
2149 Log(("VMMR3EmtRendezvous: %#x EMT#%u\n", fFlags, pVCpu->idCpu));
2150 Assert(!VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS));
2151 Assert(!pVCpu->vmm.s.fInRendezvous);
2152 pVCpu->vmm.s.fInRendezvous = true;
2153
2154 /*
2155 * Clear the slate and setup the rendezvous. This is a semaphore ping-pong orgy. :-)
2156 */
2157 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2158 {
2159 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
2160 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2161 }
2162 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2163 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2164 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2165 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2166 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
2167 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
2168 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2169 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
2170 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
2171 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
2172 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
2173
2174 /*
2175 * Set the FF and poke the other EMTs.
2176 */
2177 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
2178 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
2179
2180 /*
2181 * Do the same ourselves.
2182 */
2183 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
2184
2185 /*
2186 * The caller waits for the other EMTs to be done and return before doing
2187 * the cleanup. This makes away with wakeup / reset races we would otherwise
2188 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
2189 */
2190 for (;;)
2191 {
2192 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
2193 AssertLogRelRC(rc);
2194 if (!pVM->vmm.s.fRendezvousRecursion)
2195 break;
2196 rcStrict2 = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict2);
2197 }
2198
2199 /*
2200 * Get the return code and clean up a little bit.
2201 */
2202 VBOXSTRICTRC rcStrict3 = pVM->vmm.s.i32RendezvousStatus;
2203 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
2204
2205 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
2206 pVCpu->vmm.s.fInRendezvous = false;
2207
2208 /*
2209 * Merge rcStrict, rcStrict2 and rcStrict3.
2210 */
2211 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2212 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
2213 if ( rcStrict2 != VINF_SUCCESS
2214 && ( rcStrict == VINF_SUCCESS
2215 || rcStrict > rcStrict2))
2216 rcStrict = rcStrict2;
2217 if ( rcStrict3 != VINF_SUCCESS
2218 && ( rcStrict == VINF_SUCCESS
2219 || rcStrict > rcStrict3))
2220 rcStrict = rcStrict3;
2221 Log(("VMMR3EmtRendezvous: %#x EMT#%u returns %Rrc\n", fFlags, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
2222 }
2223
2224 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
2225 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
2226 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
2227 VERR_IPE_UNEXPECTED_INFO_STATUS);
2228 return VBOXSTRICTRC_VAL(rcStrict);
2229}
2230
2231
2232/**
2233 * Interface for vmR3SetHaltMethodU.
2234 *
2235 * @param pVCpu The cross context virtual CPU structure of the
2236 * calling EMT.
2237 * @param fMayHaltInRing0 The new state.
2238 * @param cNsSpinBlockThreshold The spin-vs-blocking threashold.
2239 * @thread EMT(pVCpu)
2240 *
2241 * @todo Move the EMT handling to VMM (or EM). I soooooo regret that VM
2242 * component.
2243 */
2244VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold)
2245{
2246 LogFlow(("VMMR3SetMayHaltInRing0(#%u, %d, %u)\n", pVCpu->idCpu, fMayHaltInRing0, cNsSpinBlockThreshold));
2247 pVCpu->vmm.s.fMayHaltInRing0 = fMayHaltInRing0;
2248 pVCpu->vmm.s.cNsSpinBlockThreshold = cNsSpinBlockThreshold;
2249}
2250
2251
2252/**
2253 * Read from the ring 0 jump buffer stack.
2254 *
2255 * @returns VBox status code.
2256 *
2257 * @param pVM The cross context VM structure.
2258 * @param idCpu The ID of the source CPU context (for the address).
2259 * @param R0Addr Where to start reading.
2260 * @param pvBuf Where to store the data we've read.
2261 * @param cbRead The number of bytes to read.
2262 */
2263VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
2264{
2265 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
2266 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
2267 AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER);
2268
2269 /*
2270 * Hopefully we've got all the requested bits. If not supply what we
2271 * can and zero the remaining stuff.
2272 */
2273 RTHCUINTPTR off = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
2274 if (off < pVCpu->vmm.s.AssertJmpBuf.cbStackValid)
2275 {
2276 size_t const cbValid = pVCpu->vmm.s.AssertJmpBuf.cbStackValid - off;
2277 if (cbRead <= cbValid)
2278 {
2279 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbRead);
2280 return VINF_SUCCESS;
2281 }
2282
2283 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbValid);
2284 RT_BZERO((uint8_t *)pvBuf + cbValid, cbRead - cbValid);
2285 }
2286 else
2287 RT_BZERO(pvBuf, cbRead);
2288
2289 /*
2290 * Supply the setjmp return RIP/EIP if requested.
2291 */
2292 if ( pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr
2293 && pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation < R0Addr + cbRead)
2294 {
2295 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue;
2296 size_t cbSrc = sizeof(pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue);
2297 size_t offDst = 0;
2298 if (R0Addr < pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
2299 offDst = pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation - R0Addr;
2300 else if (R0Addr > pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
2301 {
2302 size_t offSrc = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation;
2303 Assert(offSrc < cbSrc);
2304 pbSrc -= offSrc;
2305 cbSrc -= offSrc;
2306 }
2307 if (cbSrc > cbRead - offDst)
2308 cbSrc = cbRead - offDst;
2309 memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc);
2310
2311 //if (cbSrc == cbRead)
2312 // rc = VINF_SUCCESS;
2313 }
2314
2315 return VINF_SUCCESS;
2316}
2317
2318
2319/**
2320 * Used by the DBGF stack unwinder to initialize the register state.
2321 *
2322 * @param pUVM The user mode VM handle.
2323 * @param idCpu The ID of the CPU being unwound.
2324 * @param pState The unwind state to initialize.
2325 */
2326VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, struct RTDBGUNWINDSTATE *pState)
2327{
2328 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2329 AssertReturnVoid(pVCpu);
2330
2331 /*
2332 * This is all we really need here if we had proper unwind info (win64 only)...
2333 */
2334 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.AssertJmpBuf.UnwindBp;
2335 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
2336 pState->uPc = pVCpu->vmm.s.AssertJmpBuf.UnwindPc;
2337
2338 /*
2339 * Locate the resume point on the stack.
2340 */
2341#ifdef RT_ARCH_AMD64
2342 /* This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-amd64.asm exactly. */
2343 uintptr_t off = 0;
2344# ifdef RT_OS_WINDOWS
2345 off += 0xa0; /* XMM6 thru XMM15 */
2346# endif
2347 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2348 off += 8;
2349 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2350 off += 8;
2351# ifdef RT_OS_WINDOWS
2352 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2353 off += 8;
2354 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2355 off += 8;
2356# endif
2357 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2358 off += 8;
2359 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2360 off += 8;
2361 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2362 off += 8;
2363 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2364 off += 8;
2365 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2366 off += 8;
2367 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2368 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
2369
2370#elif defined(RT_ARCH_X86)
2371 /* This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-x86.asm exactly. */
2372 uintptr_t off = 0;
2373 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2374 off += 4;
2375 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2376 off += 4;
2377 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2378 off += 4;
2379 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2380 off += 4;
2381 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2382 off += 4;
2383 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2384 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
2385
2386#elif defined(RT_ARCH_ARM64)
2387 /** @todo PORTME: arm ring-0 */
2388
2389#else
2390# error "Port me"
2391#endif
2392}
2393
2394
2395/**
2396 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2397 *
2398 * @returns VBox status code.
2399 * @param pVM The cross context VM structure.
2400 * @param uOperation Operation to execute.
2401 * @param u64Arg Constant argument.
2402 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2403 * details.
2404 */
2405VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2406{
2407 PVMCPU pVCpu = VMMGetCpu(pVM);
2408 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
2409 return VMMR3CallR0Emt(pVM, pVCpu, (VMMR0OPERATION)uOperation, u64Arg, pReqHdr);
2410}
2411
2412
2413/**
2414 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2415 *
2416 * @returns VBox status code.
2417 * @param pVM The cross context VM structure.
2418 * @param pVCpu The cross context VM structure.
2419 * @param enmOperation Operation to execute.
2420 * @param u64Arg Constant argument.
2421 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2422 * details.
2423 */
2424VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2425{
2426 /*
2427 * Call ring-0.
2428 */
2429#ifdef NO_SUPCALLR0VMM
2430 int rc = VERR_GENERAL_FAILURE;
2431#else
2432 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
2433#endif
2434
2435 /*
2436 * Flush the logs and deal with ring-0 assertions.
2437 */
2438#ifdef LOG_ENABLED
2439 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
2440#endif
2441 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
2442 if (rc != VERR_VMM_RING0_ASSERTION)
2443 {
2444 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
2445 ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
2446 VERR_IPE_UNEXPECTED_INFO_STATUS);
2447 return rc;
2448 }
2449 return vmmR3HandleRing0Assert(pVM, pVCpu);
2450}
2451
2452
2453/**
2454 * Logs a ring-0 assertion ASAP after returning to ring-3.
2455 *
2456 * @returns VBox status code.
2457 * @param pVM The cross context VM structure.
2458 * @param pVCpu The cross context virtual CPU structure.
2459 */
2460static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
2461{
2462 RT_NOREF(pVCpu);
2463 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
2464 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
2465 return VERR_VMM_RING0_ASSERTION;
2466}
2467
2468
2469/**
2470 * Displays the Force action Flags.
2471 *
2472 * @param pVM The cross context VM structure.
2473 * @param pHlp The output helpers.
2474 * @param pszArgs The additional arguments (ignored).
2475 */
2476static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2477{
2478 int c;
2479 uint32_t f;
2480 NOREF(pszArgs);
2481
2482#define PRINT_FLAG(prf,flag) do { \
2483 if (f & (prf##flag)) \
2484 { \
2485 static const char *s_psz = #flag; \
2486 if (!(c % 6)) \
2487 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
2488 else \
2489 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2490 c++; \
2491 f &= ~(prf##flag); \
2492 } \
2493 } while (0)
2494
2495#define PRINT_GROUP(prf,grp,sfx) do { \
2496 if (f & (prf##grp##sfx)) \
2497 { \
2498 static const char *s_psz = #grp; \
2499 if (!(c % 5)) \
2500 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
2501 else \
2502 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2503 c++; \
2504 } \
2505 } while (0)
2506
2507 /*
2508 * The global flags.
2509 */
2510 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
2511 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
2512
2513 /* show the flag mnemonics */
2514 c = 0;
2515 f = fGlobalForcedActions;
2516 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
2517 PRINT_FLAG(VM_FF_,PDM_QUEUES);
2518 PRINT_FLAG(VM_FF_,PDM_DMA);
2519 PRINT_FLAG(VM_FF_,DBGF);
2520 PRINT_FLAG(VM_FF_,REQUEST);
2521 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
2522 PRINT_FLAG(VM_FF_,RESET);
2523 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
2524 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
2525 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
2526 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
2527 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
2528 if (f)
2529 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2530 else
2531 pHlp->pfnPrintf(pHlp, "\n");
2532
2533 /* the groups */
2534 c = 0;
2535 f = fGlobalForcedActions;
2536 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
2537 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
2538 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
2539 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2540 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
2541 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
2542 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
2543 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
2544 if (c)
2545 pHlp->pfnPrintf(pHlp, "\n");
2546
2547 /*
2548 * Per CPU flags.
2549 */
2550 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2551 {
2552 PVMCPU pVCpu = pVM->apCpusR3[i];
2553 const uint64_t fLocalForcedActions = pVCpu->fLocalForcedActions;
2554 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX64", i, fLocalForcedActions);
2555
2556 /* show the flag mnemonics */
2557 c = 0;
2558 f = fLocalForcedActions;
2559#if defined(VBOX_VMM_TARGET_ARMV8)
2560 PRINT_FLAG(VMCPU_FF_,INTERRUPT_IRQ);
2561 PRINT_FLAG(VMCPU_FF_,INTERRUPT_FIQ);
2562#else
2563 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
2564 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
2565#endif
2566 PRINT_FLAG(VMCPU_FF_,TIMER);
2567 PRINT_FLAG(VMCPU_FF_,INTERRUPT_NMI);
2568 PRINT_FLAG(VMCPU_FF_,INTERRUPT_SMI);
2569 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
2570 PRINT_FLAG(VMCPU_FF_,UNHALT);
2571 PRINT_FLAG(VMCPU_FF_,IEM);
2572 PRINT_FLAG(VMCPU_FF_,UPDATE_APIC);
2573 PRINT_FLAG(VMCPU_FF_,DBGF);
2574 PRINT_FLAG(VMCPU_FF_,REQUEST);
2575 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
2576 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
2577 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
2578 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
2579 PRINT_FLAG(VMCPU_FF_,TO_R3);
2580 PRINT_FLAG(VMCPU_FF_,IOM);
2581 if (f)
2582 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX64\n", c ? "," : "", f);
2583 else
2584 pHlp->pfnPrintf(pHlp, "\n");
2585
2586 /* the groups */
2587 c = 0;
2588 f = fLocalForcedActions;
2589 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
2590 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
2591 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
2592 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2593 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
2594 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
2595 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
2596 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
2597 PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
2598 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
2599 if (c)
2600 pHlp->pfnPrintf(pHlp, "\n");
2601 }
2602
2603#undef PRINT_FLAG
2604#undef PRINT_GROUP
2605}
2606
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette