VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 67955

Last change on this file since 67955 was 67955, checked in by vboxsync, 8 years ago

VMM,SUPDrv: Started on some session/VMMR0 nits. I/O control interface version bump (sorry).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 84.3 KB
Line 
1/* $Id: VMMR0.cpp 67955 2017-07-13 21:13:23Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/vmm/gvm.h>
34#ifdef VBOX_WITH_PCI_PASSTHROUGH
35# include <VBox/vmm/pdmpci.h>
36#endif
37#include <VBox/vmm/apic.h>
38
39#include <VBox/vmm/gvmm.h>
40#include <VBox/vmm/gmm.h>
41#include <VBox/vmm/gim.h>
42#include <VBox/intnet.h>
43#include <VBox/vmm/hm.h>
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <VBox/version.h>
47#include <VBox/log.h>
48
49#include <iprt/asm-amd64-x86.h>
50#include <iprt/assert.h>
51#include <iprt/crc.h>
52#include <iprt/mp.h>
53#include <iprt/once.h>
54#include <iprt/stdarg.h>
55#include <iprt/string.h>
56#include <iprt/thread.h>
57#include <iprt/timer.h>
58
59#include "dtrace/VBoxVMM.h"
60
61
62#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
63# pragma intrinsic(_AddressOfReturnAddress)
64#endif
65
66#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
67# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
68#endif
69
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/** @def VMM_CHECK_SMAP_SETUP
76 * SMAP check setup. */
77/** @def VMM_CHECK_SMAP_CHECK
78 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
79 * it will be logged and @a a_BadExpr is executed. */
80/** @def VMM_CHECK_SMAP_CHECK2
81 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
82 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
83 * executed. */
84#if defined(VBOX_STRICT) || 1
85# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
86# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
87 do { \
88 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
89 { \
90 RTCCUINTREG fEflCheck = ASMGetFlags(); \
91 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
92 { /* likely */ } \
93 else \
94 { \
95 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
96 a_BadExpr; \
97 } \
98 } \
99 } while (0)
100# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
101 do { \
102 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
103 { \
104 RTCCUINTREG fEflCheck = ASMGetFlags(); \
105 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
106 { /* likely */ } \
107 else \
108 { \
109 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
110 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
111 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
112 a_BadExpr; \
113 } \
114 } \
115 } while (0)
116#else
117# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
118# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
119# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
120#endif
121
122
123/*********************************************************************************************************************************
124* Internal Functions *
125*********************************************************************************************************************************/
126RT_C_DECLS_BEGIN
127#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
128extern uint64_t __udivdi3(uint64_t, uint64_t);
129extern uint64_t __umoddi3(uint64_t, uint64_t);
130#endif
131RT_C_DECLS_END
132
133
134/*********************************************************************************************************************************
135* Global Variables *
136*********************************************************************************************************************************/
137/** Drag in necessary library bits.
138 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
139PFNRT g_VMMR0Deps[] =
140{
141 (PFNRT)RTCrc32,
142 (PFNRT)RTOnce,
143#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
144 (PFNRT)__udivdi3,
145 (PFNRT)__umoddi3,
146#endif
147 NULL
148};
149
150#ifdef RT_OS_SOLARIS
151/* Dependency information for the native solaris loader. */
152extern "C" { char _depends_on[] = "vboxdrv"; }
153#endif
154
155
156
157/**
158 * Initialize the module.
159 * This is called when we're first loaded.
160 *
161 * @returns 0 on success.
162 * @returns VBox status on failure.
163 * @param hMod Image handle for use in APIs.
164 */
165DECLEXPORT(int) ModuleInit(void *hMod)
166{
167 VMM_CHECK_SMAP_SETUP();
168 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
169
170#ifdef VBOX_WITH_DTRACE_R0
171 /*
172 * The first thing to do is register the static tracepoints.
173 * (Deregistration is automatic.)
174 */
175 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
176 if (RT_FAILURE(rc2))
177 return rc2;
178#endif
179 LogFlow(("ModuleInit:\n"));
180
181#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
182 /*
183 * Display the CMOS debug code.
184 */
185 ASMOutU8(0x72, 0x03);
186 uint8_t bDebugCode = ASMInU8(0x73);
187 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
188 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
189#endif
190
191 /*
192 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
193 */
194 int rc = vmmInitFormatTypes();
195 if (RT_SUCCESS(rc))
196 {
197 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
198 rc = GVMMR0Init();
199 if (RT_SUCCESS(rc))
200 {
201 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
202 rc = GMMR0Init();
203 if (RT_SUCCESS(rc))
204 {
205 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
206 rc = HMR0Init();
207 if (RT_SUCCESS(rc))
208 {
209 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
210 rc = PGMRegisterStringFormatTypes();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
215 rc = PGMR0DynMapInit();
216#endif
217 if (RT_SUCCESS(rc))
218 {
219 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
220 rc = IntNetR0Init();
221 if (RT_SUCCESS(rc))
222 {
223#ifdef VBOX_WITH_PCI_PASSTHROUGH
224 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
225 rc = PciRawR0Init();
226#endif
227 if (RT_SUCCESS(rc))
228 {
229 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
230 rc = CPUMR0ModuleInit();
231 if (RT_SUCCESS(rc))
232 {
233#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
234 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
235 rc = vmmR0TripleFaultHackInit();
236 if (RT_SUCCESS(rc))
237#endif
238 {
239 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
240 if (RT_SUCCESS(rc))
241 {
242 LogFlow(("ModuleInit: returns success.\n"));
243 return VINF_SUCCESS;
244 }
245 }
246
247 /*
248 * Bail out.
249 */
250#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
251 vmmR0TripleFaultHackTerm();
252#endif
253 }
254 else
255 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
256#ifdef VBOX_WITH_PCI_PASSTHROUGH
257 PciRawR0Term();
258#endif
259 }
260 else
261 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
262 IntNetR0Term();
263 }
264 else
265 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
266#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
267 PGMR0DynMapTerm();
268#endif
269 }
270 else
271 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
272 PGMDeregisterStringFormatTypes();
273 }
274 else
275 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
276 HMR0Term();
277 }
278 else
279 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
280 GMMR0Term();
281 }
282 else
283 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
284 GVMMR0Term();
285 }
286 else
287 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
288 vmmTermFormatTypes();
289 }
290 else
291 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
292
293 LogFlow(("ModuleInit: failed %Rrc\n", rc));
294 return rc;
295}
296
297
298/**
299 * Terminate the module.
300 * This is called when we're finally unloaded.
301 *
302 * @param hMod Image handle for use in APIs.
303 */
304DECLEXPORT(void) ModuleTerm(void *hMod)
305{
306 NOREF(hMod);
307 LogFlow(("ModuleTerm:\n"));
308
309 /*
310 * Terminate the CPUM module (Local APIC cleanup).
311 */
312 CPUMR0ModuleTerm();
313
314 /*
315 * Terminate the internal network service.
316 */
317 IntNetR0Term();
318
319 /*
320 * PGM (Darwin), HM and PciRaw global cleanup.
321 */
322#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
323 PGMR0DynMapTerm();
324#endif
325#ifdef VBOX_WITH_PCI_PASSTHROUGH
326 PciRawR0Term();
327#endif
328 PGMDeregisterStringFormatTypes();
329 HMR0Term();
330#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
331 vmmR0TripleFaultHackTerm();
332#endif
333
334 /*
335 * Destroy the GMM and GVMM instances.
336 */
337 GMMR0Term();
338 GVMMR0Term();
339
340 vmmTermFormatTypes();
341
342 LogFlow(("ModuleTerm: returns\n"));
343}
344
345
346/**
347 * Initiates the R0 driver for a particular VM instance.
348 *
349 * @returns VBox status code.
350 *
351 * @param pVM The cross context VM structure.
352 * @param uSvnRev The SVN revision of the ring-3 part.
353 * @param uBuildType Build type indicator.
354 * @thread EMT.
355 */
356static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
357{
358 VMM_CHECK_SMAP_SETUP();
359 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
360
361 /*
362 * Match the SVN revisions and build type.
363 */
364 if (uSvnRev != VMMGetSvnRev())
365 {
366 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
367 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
368 return VERR_VMM_R0_VERSION_MISMATCH;
369 }
370 if (uBuildType != vmmGetBuildType())
371 {
372 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
373 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
374 return VERR_VMM_R0_VERSION_MISMATCH;
375 }
376 if ( !VALID_PTR(pVM)
377 || pVM->pVMR0 != pVM)
378 return VERR_INVALID_PARAMETER;
379
380
381#ifdef LOG_ENABLED
382 /*
383 * Register the EMT R0 logger instance for VCPU 0.
384 */
385 PVMCPU pVCpu = &pVM->aCpus[0];
386
387 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
388 if (pR0Logger)
389 {
390# if 0 /* testing of the logger. */
391 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
392 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
393 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
394 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
395
396 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
397 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
398 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
399 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
400
401 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
402 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
403 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
404 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
405
406 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
407 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
408 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
409 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
410 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
411 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
412
413 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
414 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
415
416 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
417 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
418 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
419# endif
420 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
421 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
422 pR0Logger->fRegistered = true;
423 }
424#endif /* LOG_ENABLED */
425
426 /*
427 * Check if the host supports high resolution timers or not.
428 */
429 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
430 && !RTTimerCanDoHighResolution())
431 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
432
433 /*
434 * Initialize the per VM data for GVMM and GMM.
435 */
436 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
437 int rc = GVMMR0InitVM(pVM);
438// if (RT_SUCCESS(rc))
439// rc = GMMR0InitPerVMData(pVM);
440 if (RT_SUCCESS(rc))
441 {
442 /*
443 * Init HM, CPUM and PGM (Darwin only).
444 */
445 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
446 rc = HMR0InitVM(pVM);
447 if (RT_SUCCESS(rc))
448 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
449 if (RT_SUCCESS(rc))
450 {
451 rc = CPUMR0InitVM(pVM);
452 if (RT_SUCCESS(rc))
453 {
454 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
455#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
456 rc = PGMR0DynMapInitVM(pVM);
457#endif
458 if (RT_SUCCESS(rc))
459 {
460 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
461#ifdef VBOX_WITH_PCI_PASSTHROUGH
462 rc = PciRawR0InitVM(pVM);
463#endif
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
467 rc = GIMR0InitVM(pVM);
468 if (RT_SUCCESS(rc))
469 {
470 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
471 if (RT_SUCCESS(rc))
472 {
473 GVMMR0DoneInitVM(pVM);
474 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
475 return rc;
476 }
477
478 /* bail out*/
479 GIMR0TermVM(pVM);
480 }
481#ifdef VBOX_WITH_PCI_PASSTHROUGH
482 PciRawR0TermVM(pVM);
483#endif
484 }
485 }
486 }
487 HMR0TermVM(pVM);
488 }
489 }
490
491 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
492 return rc;
493}
494
495
496/**
497 * Terminates the R0 bits for a particular VM instance.
498 *
499 * This is normally called by ring-3 as part of the VM termination process, but
500 * may alternatively be called during the support driver session cleanup when
501 * the VM object is destroyed (see GVMM).
502 *
503 * @returns VBox status code.
504 *
505 * @param pVM The cross context VM structure.
506 * @param pGVM Pointer to the global VM structure. Optional.
507 * @thread EMT or session clean up thread.
508 */
509VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
510{
511#ifdef VBOX_WITH_PCI_PASSTHROUGH
512 PciRawR0TermVM(pVM);
513#endif
514
515 /*
516 * Tell GVMM what we're up to and check that we only do this once.
517 */
518 if (GVMMR0DoingTermVM(pVM, pGVM))
519 {
520 GIMR0TermVM(pVM);
521
522 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
523 * here to make sure we don't leak any shared pages if we crash... */
524#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
525 PGMR0DynMapTermVM(pVM);
526#endif
527 HMR0TermVM(pVM);
528 }
529
530 /*
531 * Deregister the logger.
532 */
533 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * VMM ring-0 thread-context callback.
540 *
541 * This does common HM state updating and calls the HM-specific thread-context
542 * callback.
543 *
544 * @param enmEvent The thread-context event.
545 * @param pvUser Opaque pointer to the VMCPU.
546 *
547 * @thread EMT(pvUser)
548 */
549static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
550{
551 PVMCPU pVCpu = (PVMCPU)pvUser;
552
553 switch (enmEvent)
554 {
555 case RTTHREADCTXEVENT_IN:
556 {
557 /*
558 * Linux may call us with preemption enabled (really!) but technically we
559 * cannot get preempted here, otherwise we end up in an infinite recursion
560 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
561 * ad infinitum). Let's just disable preemption for now...
562 */
563 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
564 * preemption after doing the callout (one or two functions up the
565 * call chain). */
566 /** @todo r=ramshankar: See @bugref{5313#c30}. */
567 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
568 RTThreadPreemptDisable(&ParanoidPreemptState);
569
570 /* We need to update the VCPU <-> host CPU mapping. */
571 RTCPUID idHostCpu;
572 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
573 pVCpu->iHostCpuSet = iHostCpuSet;
574 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
575
576 /* In the very unlikely event that the GIP delta for the CPU we're
577 rescheduled needs calculating, try force a return to ring-3.
578 We unfortunately cannot do the measurements right here. */
579 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
580 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
581
582 /* Invoke the HM-specific thread-context callback. */
583 HMR0ThreadCtxCallback(enmEvent, pvUser);
584
585 /* Restore preemption. */
586 RTThreadPreemptRestore(&ParanoidPreemptState);
587 break;
588 }
589
590 case RTTHREADCTXEVENT_OUT:
591 {
592 /* Invoke the HM-specific thread-context callback. */
593 HMR0ThreadCtxCallback(enmEvent, pvUser);
594
595 /*
596 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
597 * have the same host CPU associated with it.
598 */
599 pVCpu->iHostCpuSet = UINT32_MAX;
600 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
601 break;
602 }
603
604 default:
605 /* Invoke the HM-specific thread-context callback. */
606 HMR0ThreadCtxCallback(enmEvent, pvUser);
607 break;
608 }
609}
610
611
612/**
613 * Creates thread switching hook for the current EMT thread.
614 *
615 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
616 * platform does not implement switcher hooks, no hooks will be create and the
617 * member set to NIL_RTTHREADCTXHOOK.
618 *
619 * @returns VBox status code.
620 * @param pVCpu The cross context virtual CPU structure.
621 * @thread EMT(pVCpu)
622 */
623VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
624{
625 VMCPU_ASSERT_EMT(pVCpu);
626 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
627
628#if 1 /* To disable this stuff change to zero. */
629 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
630 if (RT_SUCCESS(rc))
631 return rc;
632#else
633 RT_NOREF(vmmR0ThreadCtxCallback);
634 int rc = VERR_NOT_SUPPORTED;
635#endif
636
637 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
638 if (rc == VERR_NOT_SUPPORTED)
639 return VINF_SUCCESS;
640
641 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
642 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
643}
644
645
646/**
647 * Destroys the thread switching hook for the specified VCPU.
648 *
649 * @param pVCpu The cross context virtual CPU structure.
650 * @remarks Can be called from any thread.
651 */
652VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
653{
654 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
655 AssertRC(rc);
656}
657
658
659/**
660 * Disables the thread switching hook for this VCPU (if we got one).
661 *
662 * @param pVCpu The cross context virtual CPU structure.
663 * @thread EMT(pVCpu)
664 *
665 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
666 * this call. This means you have to be careful with what you do!
667 */
668VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
669{
670 /*
671 * Clear the VCPU <-> host CPU mapping as we've left HM context.
672 * @bugref{7726#c19} explains the need for this trick:
673 *
674 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
675 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
676 * longjmp & normal return to ring-3, which opens a window where we may be
677 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
678 * the CPU starts executing a different EMT. Both functions first disables
679 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
680 * an opening for getting preempted.
681 */
682 /** @todo Make HM not need this API! Then we could leave the hooks enabled
683 * all the time. */
684 /** @todo move this into the context hook disabling if(). */
685 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
686
687 /*
688 * Disable the context hook, if we got one.
689 */
690 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
691 {
692 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
693 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
694 AssertRC(rc);
695 }
696}
697
698
699/**
700 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
701 *
702 * @returns true if registered, false otherwise.
703 * @param pVCpu The cross context virtual CPU structure.
704 */
705DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
706{
707 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
708}
709
710
711/**
712 * Whether thread-context hooks are registered for this VCPU.
713 *
714 * @returns true if registered, false otherwise.
715 * @param pVCpu The cross context virtual CPU structure.
716 */
717VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
718{
719 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
720}
721
722
723#ifdef VBOX_WITH_STATISTICS
724/**
725 * Record return code statistics
726 * @param pVM The cross context VM structure.
727 * @param pVCpu The cross context virtual CPU structure.
728 * @param rc The status code.
729 */
730static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
731{
732 /*
733 * Collect statistics.
734 */
735 switch (rc)
736 {
737 case VINF_SUCCESS:
738 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
739 break;
740 case VINF_EM_RAW_INTERRUPT:
741 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
742 break;
743 case VINF_EM_RAW_INTERRUPT_HYPER:
744 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
745 break;
746 case VINF_EM_RAW_GUEST_TRAP:
747 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
748 break;
749 case VINF_EM_RAW_RING_SWITCH:
750 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
751 break;
752 case VINF_EM_RAW_RING_SWITCH_INT:
753 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
754 break;
755 case VINF_EM_RAW_STALE_SELECTOR:
756 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
757 break;
758 case VINF_EM_RAW_IRET_TRAP:
759 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
760 break;
761 case VINF_IOM_R3_IOPORT_READ:
762 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
763 break;
764 case VINF_IOM_R3_IOPORT_WRITE:
765 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
766 break;
767 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
768 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
769 break;
770 case VINF_IOM_R3_MMIO_READ:
771 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
772 break;
773 case VINF_IOM_R3_MMIO_WRITE:
774 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
775 break;
776 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
777 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
778 break;
779 case VINF_IOM_R3_MMIO_READ_WRITE:
780 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
781 break;
782 case VINF_PATM_HC_MMIO_PATCH_READ:
783 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
784 break;
785 case VINF_PATM_HC_MMIO_PATCH_WRITE:
786 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
787 break;
788 case VINF_CPUM_R3_MSR_READ:
789 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
790 break;
791 case VINF_CPUM_R3_MSR_WRITE:
792 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
793 break;
794 case VINF_EM_RAW_EMULATE_INSTR:
795 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
796 break;
797 case VINF_EM_RAW_EMULATE_IO_BLOCK:
798 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
799 break;
800 case VINF_PATCH_EMULATE_INSTR:
801 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
802 break;
803 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
804 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
805 break;
806 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
807 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
808 break;
809 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
810 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
811 break;
812 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
813 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
814 break;
815 case VINF_CSAM_PENDING_ACTION:
816 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
817 break;
818 case VINF_PGM_SYNC_CR3:
819 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
820 break;
821 case VINF_PATM_PATCH_INT3:
822 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
823 break;
824 case VINF_PATM_PATCH_TRAP_PF:
825 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
826 break;
827 case VINF_PATM_PATCH_TRAP_GP:
828 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
829 break;
830 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
831 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
832 break;
833 case VINF_EM_RESCHEDULE_REM:
834 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
835 break;
836 case VINF_EM_RAW_TO_R3:
837 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
838 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
840 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
841 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
842 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
843 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
844 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
845 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
846 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
847 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
848 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
849 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
850 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
852 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
853 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
854 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
855 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
856 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
858 else
859 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
860 break;
861
862 case VINF_EM_RAW_TIMER_PENDING:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
864 break;
865 case VINF_EM_RAW_INTERRUPT_PENDING:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
867 break;
868 case VINF_VMM_CALL_HOST:
869 switch (pVCpu->vmm.s.enmCallRing3Operation)
870 {
871 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
873 break;
874 case VMMCALLRING3_PDM_LOCK:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
876 break;
877 case VMMCALLRING3_PGM_POOL_GROW:
878 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
879 break;
880 case VMMCALLRING3_PGM_LOCK:
881 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
882 break;
883 case VMMCALLRING3_PGM_MAP_CHUNK:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
885 break;
886 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
888 break;
889 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
891 break;
892 case VMMCALLRING3_VMM_LOGGER_FLUSH:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
894 break;
895 case VMMCALLRING3_VM_SET_ERROR:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
897 break;
898 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
899 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
900 break;
901 case VMMCALLRING3_VM_R0_ASSERTION:
902 default:
903 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
904 break;
905 }
906 break;
907 case VINF_PATM_DUPLICATE_FUNCTION:
908 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
909 break;
910 case VINF_PGM_CHANGE_MODE:
911 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
912 break;
913 case VINF_PGM_POOL_FLUSH_PENDING:
914 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
915 break;
916 case VINF_EM_PENDING_REQUEST:
917 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
918 break;
919 case VINF_EM_HM_PATCH_TPR_INSTR:
920 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
921 break;
922 default:
923 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
924 break;
925 }
926}
927#endif /* VBOX_WITH_STATISTICS */
928
929
930/**
931 * The Ring 0 entry point, called by the fast-ioctl path.
932 *
933 * @param pGVM The global (ring-0) VM structure.
934 * @param pVM The cross context VM structure.
935 * The return code is stored in pVM->vmm.s.iLastGZRc.
936 * @param idCpu The Virtual CPU ID of the calling EMT.
937 * @param enmOperation Which operation to execute.
938 * @remarks Assume called with interrupts _enabled_.
939 */
940VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
941{
942 /*
943 * Validation.
944 */
945 if ( idCpu < pGVM->cCpus
946 && pGVM->cCpus == pVM->cCpus)
947 { /*likely*/ }
948 else
949 {
950 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
951 return;
952 }
953
954 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
955 PVMCPU pVCpu = &pVM->aCpus[idCpu];
956 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
957 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
958 && pVCpu->hNativeThreadR0 == hNativeThread))
959 { /* likely */ }
960 else
961 {
962 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
963 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
964 return;
965 }
966
967 /*
968 * SMAP fun.
969 */
970 VMM_CHECK_SMAP_SETUP();
971 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
972
973 /*
974 * Perform requested operation.
975 */
976 switch (enmOperation)
977 {
978 /*
979 * Switch to GC and run guest raw mode code.
980 * Disable interrupts before doing the world switch.
981 */
982 case VMMR0_DO_RAW_RUN:
983 {
984#ifdef VBOX_WITH_RAW_MODE
985# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
986 /* Some safety precautions first. */
987 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
988 {
989 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
990 break;
991 }
992# endif
993
994 /*
995 * Disable preemption.
996 */
997 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
998 RTThreadPreemptDisable(&PreemptState);
999
1000 /*
1001 * Get the host CPU identifiers, make sure they are valid and that
1002 * we've got a TSC delta for the CPU.
1003 */
1004 RTCPUID idHostCpu;
1005 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1006 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1007 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1008 {
1009 /*
1010 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
1011 */
1012# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1013 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1014# endif
1015 pVCpu->iHostCpuSet = iHostCpuSet;
1016 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1017
1018 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1019 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1020
1021 /*
1022 * We might need to disable VT-x if the active switcher turns off paging.
1023 */
1024 bool fVTxDisabled;
1025 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1026 if (RT_SUCCESS(rc))
1027 {
1028 /*
1029 * Disable interrupts and run raw-mode code. The loop is for efficiently
1030 * dispatching tracepoints that fired in raw-mode context.
1031 */
1032 RTCCUINTREG uFlags = ASMIntDisableFlags();
1033
1034 for (;;)
1035 {
1036 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1037 TMNotifyStartOfExecution(pVCpu);
1038
1039 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1040 pVCpu->vmm.s.iLastGZRc = rc;
1041
1042 TMNotifyEndOfExecution(pVCpu);
1043 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1044
1045 if (rc != VINF_VMM_CALL_TRACER)
1046 break;
1047 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1048 }
1049
1050 /*
1051 * Re-enable VT-x before we dispatch any pending host interrupts and
1052 * re-enables interrupts.
1053 */
1054 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1055
1056 if ( rc == VINF_EM_RAW_INTERRUPT
1057 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1058 TRPMR0DispatchHostInterrupt(pVM);
1059
1060 ASMSetFlags(uFlags);
1061
1062 /* Fire dtrace probe and collect statistics. */
1063 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1064# ifdef VBOX_WITH_STATISTICS
1065 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1066 vmmR0RecordRC(pVM, pVCpu, rc);
1067# endif
1068 }
1069 else
1070 pVCpu->vmm.s.iLastGZRc = rc;
1071
1072 /*
1073 * Invalidate the host CPU identifiers as we restore preemption.
1074 */
1075 pVCpu->iHostCpuSet = UINT32_MAX;
1076 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1077
1078 RTThreadPreemptRestore(&PreemptState);
1079 }
1080 /*
1081 * Invalid CPU set index or TSC delta in need of measuring.
1082 */
1083 else
1084 {
1085 RTThreadPreemptRestore(&PreemptState);
1086 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1087 {
1088 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1089 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1090 0 /*default cTries*/);
1091 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1092 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1093 else
1094 pVCpu->vmm.s.iLastGZRc = rc;
1095 }
1096 else
1097 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1098 }
1099
1100#else /* !VBOX_WITH_RAW_MODE */
1101 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1102#endif
1103 break;
1104 }
1105
1106 /*
1107 * Run guest code using the available hardware acceleration technology.
1108 */
1109 case VMMR0_DO_HM_RUN:
1110 {
1111 /*
1112 * Disable preemption.
1113 */
1114 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1115 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1116 RTThreadPreemptDisable(&PreemptState);
1117
1118 /*
1119 * Get the host CPU identifiers, make sure they are valid and that
1120 * we've got a TSC delta for the CPU.
1121 */
1122 RTCPUID idHostCpu;
1123 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1124 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1125 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1126 {
1127 pVCpu->iHostCpuSet = iHostCpuSet;
1128 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1129
1130 /*
1131 * Update the periodic preemption timer if it's active.
1132 */
1133 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1134 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1135 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1136
1137#ifdef LOG_ENABLED
1138 /*
1139 * Ugly: Lazy registration of ring 0 loggers.
1140 */
1141 if (pVCpu->idCpu > 0)
1142 {
1143 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1144 if ( pR0Logger
1145 && RT_UNLIKELY(!pR0Logger->fRegistered))
1146 {
1147 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1148 pR0Logger->fRegistered = true;
1149 }
1150 }
1151#endif
1152
1153#ifdef VMM_R0_TOUCH_FPU
1154 /*
1155 * Make sure we've got the FPU state loaded so and we don't need to clear
1156 * CR0.TS and get out of sync with the host kernel when loading the guest
1157 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1158 */
1159 CPUMR0TouchHostFpu();
1160#endif
1161 int rc;
1162 bool fPreemptRestored = false;
1163 if (!HMR0SuspendPending())
1164 {
1165 /*
1166 * Enable the context switching hook.
1167 */
1168 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1169 {
1170 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1171 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1172 }
1173
1174 /*
1175 * Enter HM context.
1176 */
1177 rc = HMR0Enter(pVM, pVCpu);
1178 if (RT_SUCCESS(rc))
1179 {
1180 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1181
1182 /*
1183 * When preemption hooks are in place, enable preemption now that
1184 * we're in HM context.
1185 */
1186 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1187 {
1188 fPreemptRestored = true;
1189 RTThreadPreemptRestore(&PreemptState);
1190 }
1191
1192 /*
1193 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1194 */
1195 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1196 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1197 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1198
1199 /*
1200 * Assert sanity on the way out. Using manual assertions code here as normal
1201 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1202 */
1203 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1204 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1205 {
1206 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1207 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1208 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1209 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1210 }
1211 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1212 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1213 {
1214 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1215 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1216 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1217 rc = VERR_INVALID_STATE;
1218 }
1219
1220 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1221 }
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1223
1224 /*
1225 * Invalidate the host CPU identifiers before we disable the context
1226 * hook / restore preemption.
1227 */
1228 pVCpu->iHostCpuSet = UINT32_MAX;
1229 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1230
1231 /*
1232 * Disable context hooks. Due to unresolved cleanup issues, we
1233 * cannot leave the hooks enabled when we return to ring-3.
1234 *
1235 * Note! At the moment HM may also have disabled the hook
1236 * when we get here, but the IPRT API handles that.
1237 */
1238 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1239 {
1240 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1241 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1242 }
1243 }
1244 /*
1245 * The system is about to go into suspend mode; go back to ring 3.
1246 */
1247 else
1248 {
1249 rc = VINF_EM_RAW_INTERRUPT;
1250 pVCpu->iHostCpuSet = UINT32_MAX;
1251 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1252 }
1253
1254 /** @todo When HM stops messing with the context hook state, we'll disable
1255 * preemption again before the RTThreadCtxHookDisable call. */
1256 if (!fPreemptRestored)
1257 RTThreadPreemptRestore(&PreemptState);
1258
1259 pVCpu->vmm.s.iLastGZRc = rc;
1260
1261 /* Fire dtrace probe and collect statistics. */
1262 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1263#ifdef VBOX_WITH_STATISTICS
1264 vmmR0RecordRC(pVM, pVCpu, rc);
1265#endif
1266 }
1267 /*
1268 * Invalid CPU set index or TSC delta in need of measuring.
1269 */
1270 else
1271 {
1272 pVCpu->iHostCpuSet = UINT32_MAX;
1273 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1274 RTThreadPreemptRestore(&PreemptState);
1275 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1276 {
1277 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1278 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1279 0 /*default cTries*/);
1280 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1281 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1282 else
1283 pVCpu->vmm.s.iLastGZRc = rc;
1284 }
1285 else
1286 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1287 }
1288 break;
1289 }
1290
1291 /*
1292 * For profiling.
1293 */
1294 case VMMR0_DO_NOP:
1295 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1296 break;
1297
1298 /*
1299 * Impossible.
1300 */
1301 default:
1302 AssertMsgFailed(("%#x\n", enmOperation));
1303 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1304 break;
1305 }
1306 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1307}
1308
1309
1310/**
1311 * Validates a session or VM session argument.
1312 *
1313 * @returns true / false accordingly.
1314 * @param pVM The cross context VM structure.
1315 * @param pClaimedSession The session claim to validate.
1316 * @param pSession The session argument.
1317 */
1318DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1319{
1320 /* This must be set! */
1321 if (!pSession)
1322 return false;
1323
1324 /* Only one out of the two. */
1325 if (pVM && pClaimedSession)
1326 return false;
1327 if (pVM)
1328 pClaimedSession = pVM->pSession;
1329 return pClaimedSession == pSession;
1330}
1331
1332
1333/**
1334 * VMMR0EntryEx worker function, either called directly or when ever possible
1335 * called thru a longjmp so we can exit safely on failure.
1336 *
1337 * @returns VBox status code.
1338 * @param pGVM The global (ring-0) VM structure.
1339 * @param pVM The cross context VM structure.
1340 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1341 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1342 * @param enmOperation Which operation to execute.
1343 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1344 * The support driver validates this if it's present.
1345 * @param u64Arg Some simple constant argument.
1346 * @param pSession The session of the caller.
1347 *
1348 * @remarks Assume called with interrupts _enabled_.
1349 */
1350static int vmmR0EntryExWorker(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1351 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1352{
1353 /*
1354 * Validate pGVM, pVM and idCpu for consistency and validity.
1355 */
1356 if ( pGVM != NULL
1357 || pVM != NULL)
1358 {
1359 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1360 && RT_VALID_PTR(pVM)
1361 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1362 { /* likely */ }
1363 else
1364 {
1365 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1366 return VERR_INVALID_POINTER;
1367 }
1368
1369 if (RT_LIKELY(pGVM->pVM == pVM))
1370 { /* likely */ }
1371 else
1372 {
1373 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
1374 return VERR_INVALID_PARAMETER;
1375 }
1376
1377 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1378 { /* likely */ }
1379 else
1380 {
1381 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1382 return VERR_INVALID_PARAMETER;
1383 }
1384
1385 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1386 && pVM->enmVMState <= VMSTATE_TERMINATED
1387 && pVM->cCpus == pGVM->cCpus
1388 && pVM->pSession == pSession
1389 && pVM->pVMR0 == pVM))
1390 { /* likely */ }
1391 else
1392 {
1393 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pVMR0=%p(==%p)}! (op=%d)\n",
1394 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pVMR0, pVM, enmOperation);
1395 return VERR_INVALID_POINTER;
1396 }
1397 }
1398 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1399 { /* likely */ }
1400 else
1401 {
1402 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1403 return VERR_INVALID_PARAMETER;
1404 }
1405
1406 /*
1407 * SMAP fun.
1408 */
1409 VMM_CHECK_SMAP_SETUP();
1410 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1411
1412 /*
1413 * Process the request.
1414 */
1415 int rc;
1416 switch (enmOperation)
1417 {
1418 /*
1419 * GVM requests
1420 */
1421 case VMMR0_DO_GVMM_CREATE_VM:
1422 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1423 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1424 else
1425 rc = VERR_INVALID_PARAMETER;
1426 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1427 break;
1428
1429 case VMMR0_DO_GVMM_DESTROY_VM:
1430 if (pReqHdr == NULL && u64Arg == 0)
1431 rc = GVMMR0DestroyVM(pGVM, pVM);
1432 else
1433 rc = VERR_INVALID_PARAMETER;
1434 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1435 break;
1436
1437 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1438 if (pGVM != NULL && pVM != NULL )
1439 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1440 else
1441 rc = VERR_INVALID_PARAMETER;
1442 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1443 break;
1444
1445 case VMMR0_DO_GVMM_SCHED_HALT:
1446 if (pReqHdr)
1447 return VERR_INVALID_PARAMETER;
1448 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1449 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1450 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1451 break;
1452
1453 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1454 if (pReqHdr || u64Arg)
1455 return VERR_INVALID_PARAMETER;
1456 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1457 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1458 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1459 break;
1460
1461 case VMMR0_DO_GVMM_SCHED_POKE:
1462 if (pReqHdr || u64Arg)
1463 return VERR_INVALID_PARAMETER;
1464 rc = GVMMR0SchedPoke(pVM, idCpu);
1465 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1466 break;
1467
1468 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1469 if (u64Arg)
1470 return VERR_INVALID_PARAMETER;
1471 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1473 break;
1474
1475 case VMMR0_DO_GVMM_SCHED_POLL:
1476 if (pReqHdr || u64Arg > 1)
1477 return VERR_INVALID_PARAMETER;
1478 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1479 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1480 break;
1481
1482 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1483 if (u64Arg)
1484 return VERR_INVALID_PARAMETER;
1485 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1486 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1487 break;
1488
1489 case VMMR0_DO_GVMM_RESET_STATISTICS:
1490 if (u64Arg)
1491 return VERR_INVALID_PARAMETER;
1492 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1493 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1494 break;
1495
1496 /*
1497 * Initialize the R0 part of a VM instance.
1498 */
1499 case VMMR0_DO_VMMR0_INIT:
1500 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1501 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1502 break;
1503
1504 /*
1505 * Terminate the R0 part of a VM instance.
1506 */
1507 case VMMR0_DO_VMMR0_TERM:
1508 rc = VMMR0TermVM(pVM, NULL);
1509 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1510 break;
1511
1512 /*
1513 * Attempt to enable hm mode and check the current setting.
1514 */
1515 case VMMR0_DO_HM_ENABLE:
1516 rc = HMR0EnableAllCpus(pVM);
1517 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1518 break;
1519
1520 /*
1521 * Setup the hardware accelerated session.
1522 */
1523 case VMMR0_DO_HM_SETUP_VM:
1524 rc = HMR0SetupVM(pVM);
1525 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1526 break;
1527
1528 /*
1529 * Switch to RC to execute Hypervisor function.
1530 */
1531 case VMMR0_DO_CALL_HYPERVISOR:
1532 {
1533#ifdef VBOX_WITH_RAW_MODE
1534 /*
1535 * Validate input / context.
1536 */
1537 if (RT_UNLIKELY(idCpu != 0))
1538 return VERR_INVALID_CPU_ID;
1539 if (RT_UNLIKELY(pVM->cCpus != 1))
1540 return VERR_INVALID_PARAMETER;
1541 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1542# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1543 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1544 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1545# endif
1546
1547 /*
1548 * Disable interrupts.
1549 */
1550 RTCCUINTREG fFlags = ASMIntDisableFlags();
1551
1552 /*
1553 * Get the host CPU identifiers, make sure they are valid and that
1554 * we've got a TSC delta for the CPU.
1555 */
1556 RTCPUID idHostCpu;
1557 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1558 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1559 {
1560 ASMSetFlags(fFlags);
1561 return VERR_INVALID_CPU_INDEX;
1562 }
1563 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1564 {
1565 ASMSetFlags(fFlags);
1566 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1567 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1568 0 /*default cTries*/);
1569 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1570 {
1571 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1572 return rc;
1573 }
1574 }
1575
1576 /*
1577 * Commit the CPU identifiers.
1578 */
1579# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1580 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1581# endif
1582 pVCpu->iHostCpuSet = iHostCpuSet;
1583 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1584
1585 /*
1586 * We might need to disable VT-x if the active switcher turns off paging.
1587 */
1588 bool fVTxDisabled;
1589 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1590 if (RT_SUCCESS(rc))
1591 {
1592 /*
1593 * Go through the wormhole...
1594 */
1595 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1596
1597 /*
1598 * Re-enable VT-x before we dispatch any pending host interrupts.
1599 */
1600 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1601
1602 if ( rc == VINF_EM_RAW_INTERRUPT
1603 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1604 TRPMR0DispatchHostInterrupt(pVM);
1605 }
1606
1607 /*
1608 * Invalidate the host CPU identifiers as we restore interrupts.
1609 */
1610 pVCpu->iHostCpuSet = UINT32_MAX;
1611 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1612 ASMSetFlags(fFlags);
1613
1614#else /* !VBOX_WITH_RAW_MODE */
1615 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1616#endif
1617 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1618 break;
1619 }
1620
1621 /*
1622 * PGM wrappers.
1623 */
1624 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1625 if (idCpu == NIL_VMCPUID)
1626 return VERR_INVALID_CPU_ID;
1627 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1628 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1629 break;
1630
1631 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1632 if (idCpu == NIL_VMCPUID)
1633 return VERR_INVALID_CPU_ID;
1634 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1635 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1636 break;
1637
1638 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1639 if (idCpu == NIL_VMCPUID)
1640 return VERR_INVALID_CPU_ID;
1641 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1642 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1643 break;
1644
1645 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1646 if (idCpu != 0)
1647 return VERR_INVALID_CPU_ID;
1648 rc = PGMR0PhysSetupIommu(pVM);
1649 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1650 break;
1651
1652 /*
1653 * GMM wrappers.
1654 */
1655 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1656 if (u64Arg)
1657 return VERR_INVALID_PARAMETER;
1658 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1659 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1660 break;
1661
1662 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1663 if (u64Arg)
1664 return VERR_INVALID_PARAMETER;
1665 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1666 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1667 break;
1668
1669 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1670 if (u64Arg)
1671 return VERR_INVALID_PARAMETER;
1672 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1673 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1674 break;
1675
1676 case VMMR0_DO_GMM_FREE_PAGES:
1677 if (u64Arg)
1678 return VERR_INVALID_PARAMETER;
1679 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1680 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1681 break;
1682
1683 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1684 if (u64Arg)
1685 return VERR_INVALID_PARAMETER;
1686 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1687 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1688 break;
1689
1690 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1691 if (u64Arg)
1692 return VERR_INVALID_PARAMETER;
1693 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1694 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1695 break;
1696
1697 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1698 if (idCpu == NIL_VMCPUID)
1699 return VERR_INVALID_CPU_ID;
1700 if (u64Arg)
1701 return VERR_INVALID_PARAMETER;
1702 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1703 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1704 break;
1705
1706 case VMMR0_DO_GMM_BALLOONED_PAGES:
1707 if (u64Arg)
1708 return VERR_INVALID_PARAMETER;
1709 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1710 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1711 break;
1712
1713 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1714 if (u64Arg)
1715 return VERR_INVALID_PARAMETER;
1716 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1717 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1718 break;
1719
1720 case VMMR0_DO_GMM_SEED_CHUNK:
1721 if (pReqHdr)
1722 return VERR_INVALID_PARAMETER;
1723 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1724 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1725 break;
1726
1727 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1728 if (idCpu == NIL_VMCPUID)
1729 return VERR_INVALID_CPU_ID;
1730 if (u64Arg)
1731 return VERR_INVALID_PARAMETER;
1732 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1733 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1734 break;
1735
1736 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1737 if (idCpu == NIL_VMCPUID)
1738 return VERR_INVALID_CPU_ID;
1739 if (u64Arg)
1740 return VERR_INVALID_PARAMETER;
1741 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1742 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1743 break;
1744
1745 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1746 if (idCpu == NIL_VMCPUID)
1747 return VERR_INVALID_CPU_ID;
1748 if ( u64Arg
1749 || pReqHdr)
1750 return VERR_INVALID_PARAMETER;
1751 rc = GMMR0ResetSharedModules(pVM, idCpu);
1752 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1753 break;
1754
1755#ifdef VBOX_WITH_PAGE_SHARING
1756 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1757 {
1758 if (idCpu == NIL_VMCPUID)
1759 return VERR_INVALID_CPU_ID;
1760 if ( u64Arg
1761 || pReqHdr)
1762 return VERR_INVALID_PARAMETER;
1763
1764 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1765 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1766
1767# ifdef DEBUG_sandervl
1768 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1769 /** @todo this can have bad side effects for unexpected jumps back to r3. */
1770 rc = GMMR0CheckSharedModulesStart(pVM);
1771 if (rc == VINF_SUCCESS)
1772 {
1773 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1774 Assert( rc == VINF_SUCCESS
1775 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1776 GMMR0CheckSharedModulesEnd(pVM);
1777 }
1778# else
1779 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1780# endif
1781 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1782 break;
1783 }
1784#endif
1785
1786#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1787 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1788 if (u64Arg)
1789 return VERR_INVALID_PARAMETER;
1790 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1791 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1792 break;
1793#endif
1794
1795 case VMMR0_DO_GMM_QUERY_STATISTICS:
1796 if (u64Arg)
1797 return VERR_INVALID_PARAMETER;
1798 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1799 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1800 break;
1801
1802 case VMMR0_DO_GMM_RESET_STATISTICS:
1803 if (u64Arg)
1804 return VERR_INVALID_PARAMETER;
1805 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1806 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1807 break;
1808
1809 /*
1810 * A quick GCFGM mock-up.
1811 */
1812 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1813 case VMMR0_DO_GCFGM_SET_VALUE:
1814 case VMMR0_DO_GCFGM_QUERY_VALUE:
1815 {
1816 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1817 return VERR_INVALID_PARAMETER;
1818 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1819 if (pReq->Hdr.cbReq != sizeof(*pReq))
1820 return VERR_INVALID_PARAMETER;
1821 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1822 {
1823 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1824 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1825 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1826 }
1827 else
1828 {
1829 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1830 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1831 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1832 }
1833 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1834 break;
1835 }
1836
1837 /*
1838 * PDM Wrappers.
1839 */
1840 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1841 {
1842 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1843 return VERR_INVALID_PARAMETER;
1844 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1845 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1846 break;
1847 }
1848
1849 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1850 {
1851 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1852 return VERR_INVALID_PARAMETER;
1853 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1854 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1855 break;
1856 }
1857
1858 /*
1859 * Requests to the internal networking service.
1860 */
1861 case VMMR0_DO_INTNET_OPEN:
1862 {
1863 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1864 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1865 return VERR_INVALID_PARAMETER;
1866 rc = IntNetR0OpenReq(pSession, pReq);
1867 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1868 break;
1869 }
1870
1871 case VMMR0_DO_INTNET_IF_CLOSE:
1872 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1873 return VERR_INVALID_PARAMETER;
1874 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1875 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1876 break;
1877
1878
1879 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1880 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1881 return VERR_INVALID_PARAMETER;
1882 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1883 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1884 break;
1885
1886 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1887 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1888 return VERR_INVALID_PARAMETER;
1889 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1890 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1891 break;
1892
1893 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1894 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1895 return VERR_INVALID_PARAMETER;
1896 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1897 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1901 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1902 return VERR_INVALID_PARAMETER;
1903 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1904 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1905 break;
1906
1907 case VMMR0_DO_INTNET_IF_SEND:
1908 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1909 return VERR_INVALID_PARAMETER;
1910 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1911 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1912 break;
1913
1914 case VMMR0_DO_INTNET_IF_WAIT:
1915 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1916 return VERR_INVALID_PARAMETER;
1917 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1918 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1919 break;
1920
1921 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1922 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1923 return VERR_INVALID_PARAMETER;
1924 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1925 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1926 break;
1927
1928#ifdef VBOX_WITH_PCI_PASSTHROUGH
1929 /*
1930 * Requests to host PCI driver service.
1931 */
1932 case VMMR0_DO_PCIRAW_REQ:
1933 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1934 return VERR_INVALID_PARAMETER;
1935 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1936 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1937 break;
1938#endif
1939 /*
1940 * For profiling.
1941 */
1942 case VMMR0_DO_NOP:
1943 case VMMR0_DO_SLOW_NOP:
1944 return VINF_SUCCESS;
1945
1946 /*
1947 * For testing Ring-0 APIs invoked in this environment.
1948 */
1949 case VMMR0_DO_TESTS:
1950 /** @todo make new test */
1951 return VINF_SUCCESS;
1952
1953
1954#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
1955 case VMMR0_DO_TEST_SWITCHER3264:
1956 if (idCpu == NIL_VMCPUID)
1957 return VERR_INVALID_CPU_ID;
1958 rc = HMR0TestSwitcher3264(pVM);
1959 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1960 break;
1961#endif
1962 default:
1963 /*
1964 * We're returning VERR_NOT_SUPPORT here so we've got something else
1965 * than -1 which the interrupt gate glue code might return.
1966 */
1967 Log(("operation %#x is not supported\n", enmOperation));
1968 return VERR_NOT_SUPPORTED;
1969 }
1970 return rc;
1971}
1972
1973
1974/**
1975 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1976 */
1977typedef struct VMMR0ENTRYEXARGS
1978{
1979 PGVM pGVM;
1980 PVM pVM;
1981 VMCPUID idCpu;
1982 VMMR0OPERATION enmOperation;
1983 PSUPVMMR0REQHDR pReq;
1984 uint64_t u64Arg;
1985 PSUPDRVSESSION pSession;
1986} VMMR0ENTRYEXARGS;
1987/** Pointer to a vmmR0EntryExWrapper argument package. */
1988typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1989
1990/**
1991 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1992 *
1993 * @returns VBox status code.
1994 * @param pvArgs The argument package
1995 */
1996static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1997{
1998 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
1999 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2000 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2001 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2002 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2003 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2004 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2005}
2006
2007
2008/**
2009 * The Ring 0 entry point, called by the support library (SUP).
2010 *
2011 * @returns VBox status code.
2012 * @param pVM The cross context VM structure.
2013 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2014 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2015 * @param enmOperation Which operation to execute.
2016 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2017 * @param u64Arg Some simple constant argument.
2018 * @param pSession The session of the caller.
2019 * @remarks Assume called with interrupts _enabled_.
2020 */
2021VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2022 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2023{
2024 /*
2025 * Requests that should only happen on the EMT thread will be
2026 * wrapped in a setjmp so we can assert without causing trouble.
2027 */
2028 if ( pVM != NULL
2029 && pGVM != NULL
2030 && idCpu < pGVM->cCpus
2031 && pVM->pVMR0 != NULL)
2032 {
2033 switch (enmOperation)
2034 {
2035 /* These might/will be called before VMMR3Init. */
2036 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2037 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2038 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2039 case VMMR0_DO_GMM_FREE_PAGES:
2040 case VMMR0_DO_GMM_BALLOONED_PAGES:
2041 /* On the mac we might not have a valid jmp buf, so check these as well. */
2042 case VMMR0_DO_VMMR0_INIT:
2043 case VMMR0_DO_VMMR0_TERM:
2044 {
2045 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2046 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2047 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2048 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2049 && pVCpu->hNativeThreadR0 == hNativeThread))
2050 {
2051 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2052 break;
2053
2054 /** @todo validate this EMT claim... GVM knows. */
2055 VMMR0ENTRYEXARGS Args;
2056 Args.pGVM = pGVM;
2057 Args.pVM = pVM;
2058 Args.idCpu = idCpu;
2059 Args.enmOperation = enmOperation;
2060 Args.pReq = pReq;
2061 Args.u64Arg = u64Arg;
2062 Args.pSession = pSession;
2063 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2064 }
2065 return VERR_VM_THREAD_NOT_EMT;
2066 }
2067
2068 default:
2069 break;
2070 }
2071 }
2072 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2073}
2074
2075
2076/**
2077 * Checks whether we've armed the ring-0 long jump machinery.
2078 *
2079 * @returns @c true / @c false
2080 * @param pVCpu The cross context virtual CPU structure.
2081 * @thread EMT
2082 * @sa VMMIsLongJumpArmed
2083 */
2084VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2085{
2086#ifdef RT_ARCH_X86
2087 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2088 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2089#else
2090 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2091 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2092#endif
2093}
2094
2095
2096/**
2097 * Checks whether we've done a ring-3 long jump.
2098 *
2099 * @returns @c true / @c false
2100 * @param pVCpu The cross context virtual CPU structure.
2101 * @thread EMT
2102 */
2103VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2104{
2105 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2106}
2107
2108
2109/**
2110 * Internal R0 logger worker: Flush logger.
2111 *
2112 * @param pLogger The logger instance to flush.
2113 * @remark This function must be exported!
2114 */
2115VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2116{
2117#ifdef LOG_ENABLED
2118 /*
2119 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2120 * (This is a bit paranoid code.)
2121 */
2122 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2123 if ( !VALID_PTR(pR0Logger)
2124 || !VALID_PTR(pR0Logger + 1)
2125 || pLogger->u32Magic != RTLOGGER_MAGIC)
2126 {
2127# ifdef DEBUG
2128 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2129# endif
2130 return;
2131 }
2132 if (pR0Logger->fFlushingDisabled)
2133 return; /* quietly */
2134
2135 PVM pVM = pR0Logger->pVM;
2136 if ( !VALID_PTR(pVM)
2137 || pVM->pVMR0 != pVM)
2138 {
2139# ifdef DEBUG
2140 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2141# endif
2142 return;
2143 }
2144
2145 PVMCPU pVCpu = VMMGetCpu(pVM);
2146 if (pVCpu)
2147 {
2148 /*
2149 * Check that the jump buffer is armed.
2150 */
2151# ifdef RT_ARCH_X86
2152 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2153 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2154# else
2155 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2156 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2157# endif
2158 {
2159# ifdef DEBUG
2160 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2161# endif
2162 return;
2163 }
2164 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2165 }
2166# ifdef DEBUG
2167 else
2168 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2169# endif
2170#else
2171 NOREF(pLogger);
2172#endif /* LOG_ENABLED */
2173}
2174
2175/**
2176 * Internal R0 logger worker: Custom prefix.
2177 *
2178 * @returns Number of chars written.
2179 *
2180 * @param pLogger The logger instance.
2181 * @param pchBuf The output buffer.
2182 * @param cchBuf The size of the buffer.
2183 * @param pvUser User argument (ignored).
2184 */
2185VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2186{
2187 NOREF(pvUser);
2188#ifdef LOG_ENABLED
2189 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2190 if ( !VALID_PTR(pR0Logger)
2191 || !VALID_PTR(pR0Logger + 1)
2192 || pLogger->u32Magic != RTLOGGER_MAGIC
2193 || cchBuf < 2)
2194 return 0;
2195
2196 static const char s_szHex[17] = "0123456789abcdef";
2197 VMCPUID const idCpu = pR0Logger->idCpu;
2198 pchBuf[1] = s_szHex[ idCpu & 15];
2199 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2200
2201 return 2;
2202#else
2203 NOREF(pLogger); NOREF(pchBuf); NOREF(cchBuf);
2204 return 0;
2205#endif
2206}
2207
2208#ifdef LOG_ENABLED
2209
2210/**
2211 * Disables flushing of the ring-0 debug log.
2212 *
2213 * @param pVCpu The cross context virtual CPU structure.
2214 */
2215VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2216{
2217 if (pVCpu->vmm.s.pR0LoggerR0)
2218 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2219}
2220
2221
2222/**
2223 * Enables flushing of the ring-0 debug log.
2224 *
2225 * @param pVCpu The cross context virtual CPU structure.
2226 */
2227VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2228{
2229 if (pVCpu->vmm.s.pR0LoggerR0)
2230 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2231}
2232
2233
2234/**
2235 * Checks if log flushing is disabled or not.
2236 *
2237 * @param pVCpu The cross context virtual CPU structure.
2238 */
2239VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2240{
2241 if (pVCpu->vmm.s.pR0LoggerR0)
2242 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2243 return true;
2244}
2245#endif /* LOG_ENABLED */
2246
2247/**
2248 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2249 *
2250 * @returns true if the breakpoint should be hit, false if it should be ignored.
2251 */
2252DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2253{
2254#if 0
2255 return true;
2256#else
2257 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2258 if (pVM)
2259 {
2260 PVMCPU pVCpu = VMMGetCpu(pVM);
2261
2262 if (pVCpu)
2263 {
2264#ifdef RT_ARCH_X86
2265 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2266 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2267#else
2268 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2269 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2270#endif
2271 {
2272 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2273 return RT_FAILURE_NP(rc);
2274 }
2275 }
2276 }
2277#ifdef RT_OS_LINUX
2278 return true;
2279#else
2280 return false;
2281#endif
2282#endif
2283}
2284
2285
2286/**
2287 * Override this so we can push it up to ring-3.
2288 *
2289 * @param pszExpr Expression. Can be NULL.
2290 * @param uLine Location line number.
2291 * @param pszFile Location file name.
2292 * @param pszFunction Location function name.
2293 */
2294DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2295{
2296 /*
2297 * To the log.
2298 */
2299 LogAlways(("\n!!R0-Assertion Failed!!\n"
2300 "Expression: %s\n"
2301 "Location : %s(%d) %s\n",
2302 pszExpr, pszFile, uLine, pszFunction));
2303
2304 /*
2305 * To the global VMM buffer.
2306 */
2307 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2308 if (pVM)
2309 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2310 "\n!!R0-Assertion Failed!!\n"
2311 "Expression: %.*s\n"
2312 "Location : %s(%d) %s\n",
2313 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2314 pszFile, uLine, pszFunction);
2315
2316 /*
2317 * Continue the normal way.
2318 */
2319 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2320}
2321
2322
2323/**
2324 * Callback for RTLogFormatV which writes to the ring-3 log port.
2325 * See PFNLOGOUTPUT() for details.
2326 */
2327static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2328{
2329 for (size_t i = 0; i < cbChars; i++)
2330 {
2331 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2332 }
2333
2334 NOREF(pv);
2335 return cbChars;
2336}
2337
2338
2339/**
2340 * Override this so we can push it up to ring-3.
2341 *
2342 * @param pszFormat The format string.
2343 * @param va Arguments.
2344 */
2345DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2346{
2347 va_list vaCopy;
2348
2349 /*
2350 * Push the message to the loggers.
2351 */
2352 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2353 if (pLog)
2354 {
2355 va_copy(vaCopy, va);
2356 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2357 va_end(vaCopy);
2358 }
2359 pLog = RTLogRelGetDefaultInstance();
2360 if (pLog)
2361 {
2362 va_copy(vaCopy, va);
2363 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2364 va_end(vaCopy);
2365 }
2366
2367 /*
2368 * Push it to the global VMM buffer.
2369 */
2370 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2371 if (pVM)
2372 {
2373 va_copy(vaCopy, va);
2374 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2375 va_end(vaCopy);
2376 }
2377
2378 /*
2379 * Continue the normal way.
2380 */
2381 RTAssertMsg2V(pszFormat, va);
2382}
2383
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette