VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp@ 103131

Last change on this file since 103131 was 102987, checked in by vboxsync, 9 months ago

VMM/HM: Nested VMX: bugref:10318 Log host CR0 and a couple of MSRs for split-lock disable status.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 72.1 KB
Line 
1/* $Id: HMR0.cpp 102987 2024-01-22 10:07:42Z vboxsync $ */
2/** @file
3 * Hardware Assisted Virtualization Manager (HM) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_HM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/pgm.h>
36#include "HMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/hm_svm.h>
39#include <VBox/vmm/hmvmxinline.h>
40#include <VBox/err.h>
41#include <VBox/log.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/asm-amd64-x86.h>
45#include <iprt/cpuset.h>
46#include <iprt/mem.h>
47#include <iprt/memobj.h>
48#include <iprt/once.h>
49#include <iprt/param.h>
50#include <iprt/power.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53#include <iprt/x86.h>
54#include "HMVMXR0.h"
55#include "HMSVMR0.h"
56
57
58/*********************************************************************************************************************************
59* Internal Functions *
60*********************************************************************************************************************************/
61static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
62static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
63static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
64static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData);
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70/**
71 * This is used to manage the status code of a RTMpOnAll in HM.
72 */
73typedef struct HMR0FIRSTRC
74{
75 /** The status code. */
76 int32_t volatile rc;
77 /** The ID of the CPU reporting the first failure. */
78 RTCPUID volatile idCpu;
79} HMR0FIRSTRC;
80/** Pointer to a first return code structure. */
81typedef HMR0FIRSTRC *PHMR0FIRSTRC;
82
83/**
84 * Ring-0 method table for AMD-V and VT-x specific operations.
85 */
86typedef struct HMR0VTABLE
87{
88 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVMCPUCC pVCpu));
89 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit));
90 DECLR0CALLBACKMEMBER(int, pfnAssertionCallback, (PVMCPUCC pVCpu));
91 DECLR0CALLBACKMEMBER(int, pfnExportHostState, (PVMCPUCC pVCpu));
92 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPUCC pVCpu));
93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
94 bool fEnabledByHost, PCSUPHWVIRTMSRS pHwvirtMsrs));
95 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
96 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVMCC pVM));
97 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVMCC pVM));
98 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVMCC pVM));
99} HMR0VTABLE;
100
101
102/*********************************************************************************************************************************
103* Global Variables *
104*********************************************************************************************************************************/
105/** The active ring-0 HM operations (copied from one of the table at init). */
106static HMR0VTABLE g_HmR0Ops;
107/** Indicates whether the host is suspending or not. We'll refuse a few
108 * actions when the host is being suspended to speed up the suspending and
109 * avoid trouble. */
110static bool volatile g_fHmSuspended;
111/** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's
112 * enabled and disabled each time it's used to execute guest code. */
113static bool g_fHmGlobalInit;
114/** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
115uint32_t g_fHmHostKernelFeatures;
116/** Maximum allowed ASID/VPID (inclusive).
117 * @todo r=bird: This is exclusive for VT-x according to source code comment.
118 * Couldn't immediately find any docs on AMD-V, but suspect it is
119 * exclusive there as well given how hmR0SvmFlushTaggedTlb() use it. */
120uint32_t g_uHmMaxAsid;
121
122
123/** Set if VT-x (VMX) is supported by the CPU. */
124bool g_fHmVmxSupported = false;
125/** VMX: Whether we're using the preemption timer or not. */
126bool g_fHmVmxUsePreemptTimer;
127/** VMX: The shift mask employed by the VMX-Preemption timer. */
128uint8_t g_cHmVmxPreemptTimerShift;
129/** VMX: Set if swapping EFER is supported. */
130bool g_fHmVmxSupportsVmcsEfer = false;
131/** VMX: Whether we're using SUPR0EnableVTx or not. */
132static bool g_fHmVmxUsingSUPR0EnableVTx = false;
133/** VMX: Set if we've called SUPR0EnableVTx(true) and should disable it during
134 * module termination. */
135static bool g_fHmVmxCalledSUPR0EnableVTx = false;
136/** VMX: Host CR0 value (set by ring-0 VMX init) */
137uint64_t g_uHmVmxHostCr0;
138/** VMX: Host CR4 value (set by ring-0 VMX init) */
139uint64_t g_uHmVmxHostCr4;
140/** VMX: Host EFER value (set by ring-0 VMX init) */
141uint64_t g_uHmVmxHostMsrEfer;
142/** VMX: Host SMM monitor control (used for logging/diagnostics) */
143uint64_t g_uHmVmxHostSmmMonitorCtl;
144/** VMX: Host core capabilities (set by ring-0 VMX init) */
145uint64_t g_uHmVmxHostCoreCap;
146/** VMX: Host memory control register (set by ring-0 VMX init) */
147uint64_t g_uHmVmxHostMemoryCtrl;
148
149
150/** Set if AMD-V is supported by the CPU. */
151bool g_fHmSvmSupported = false;
152/** SVM revision. */
153uint32_t g_uHmSvmRev;
154/** SVM feature bits from cpuid 0x8000000a */
155uint32_t g_fHmSvmFeatures;
156
157
158/** MSRs. */
159SUPHWVIRTMSRS g_HmMsrs;
160
161/** Last recorded error code during HM ring-0 init. */
162static int32_t g_rcHmInit = VINF_SUCCESS;
163
164/** Per CPU globals. */
165static HMPHYSCPU g_aHmCpuInfo[RTCPUSET_MAX_CPUS];
166
167/** Whether we've already initialized all CPUs.
168 * @remarks We could check the EnableAllCpusOnce state, but this is
169 * simpler and hopefully easier to understand. */
170static bool g_fHmEnabled = false;
171/** Serialize initialization in HMR0EnableAllCpus. */
172static RTONCE g_HmEnableAllCpusOnce = RTONCE_INITIALIZER;
173
174
175/** HM ring-0 operations for VT-x. */
176static HMR0VTABLE const g_HmR0OpsVmx =
177{
178 /* .pfnEnterSession = */ VMXR0Enter,
179 /* .pfnThreadCtxCallback = */ VMXR0ThreadCtxCallback,
180 /* .pfnAssertionCallback = */ VMXR0AssertionCallback,
181 /* .pfnExportHostState = */ VMXR0ExportHostState,
182 /* .pfnRunGuestCode = */ VMXR0RunGuestCode,
183 /* .pfnEnableCpu = */ VMXR0EnableCpu,
184 /* .pfnDisableCpu = */ VMXR0DisableCpu,
185 /* .pfnInitVM = */ VMXR0InitVM,
186 /* .pfnTermVM = */ VMXR0TermVM,
187 /* .pfnSetupVM = */ VMXR0SetupVM,
188};
189
190/** HM ring-0 operations for AMD-V. */
191static HMR0VTABLE const g_HmR0OpsSvm =
192{
193 /* .pfnEnterSession = */ SVMR0Enter,
194 /* .pfnThreadCtxCallback = */ SVMR0ThreadCtxCallback,
195 /* .pfnAssertionCallback = */ SVMR0AssertionCallback,
196 /* .pfnExportHostState = */ SVMR0ExportHostState,
197 /* .pfnRunGuestCode = */ SVMR0RunGuestCode,
198 /* .pfnEnableCpu = */ SVMR0EnableCpu,
199 /* .pfnDisableCpu = */ SVMR0DisableCpu,
200 /* .pfnInitVM = */ SVMR0InitVM,
201 /* .pfnTermVM = */ SVMR0TermVM,
202 /* .pfnSetupVM = */ SVMR0SetupVM,
203};
204
205
206/** @name Dummy callback handlers for when neither VT-x nor AMD-V is supported.
207 * @{ */
208
209static DECLCALLBACK(int) hmR0DummyEnter(PVMCPUCC pVCpu)
210{
211 RT_NOREF(pVCpu);
212 return VINF_SUCCESS;
213}
214
215static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit)
216{
217 RT_NOREF(enmEvent, pVCpu, fGlobalInit);
218}
219
220static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
221 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs)
222{
223 RT_NOREF(pHostCpu, pVM, pvCpuPage, HCPhysCpuPage, fEnabledBySystem, pHwvirtMsrs);
224 return VINF_SUCCESS;
225}
226
227static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
228{
229 RT_NOREF(pHostCpu, pvCpuPage, HCPhysCpuPage);
230 return VINF_SUCCESS;
231}
232
233static DECLCALLBACK(int) hmR0DummyInitVM(PVMCC pVM)
234{
235 RT_NOREF(pVM);
236 return VINF_SUCCESS;
237}
238
239static DECLCALLBACK(int) hmR0DummyTermVM(PVMCC pVM)
240{
241 RT_NOREF(pVM);
242 return VINF_SUCCESS;
243}
244
245static DECLCALLBACK(int) hmR0DummySetupVM(PVMCC pVM)
246{
247 RT_NOREF(pVM);
248 return VINF_SUCCESS;
249}
250
251static DECLCALLBACK(int) hmR0DummyAssertionCallback(PVMCPUCC pVCpu)
252{
253 RT_NOREF(pVCpu);
254 return VINF_SUCCESS;
255}
256
257static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVMCPUCC pVCpu)
258{
259 RT_NOREF(pVCpu);
260 return VERR_NOT_SUPPORTED;
261}
262
263static DECLCALLBACK(int) hmR0DummyExportHostState(PVMCPUCC pVCpu)
264{
265 RT_NOREF(pVCpu);
266 return VINF_SUCCESS;
267}
268
269/** Dummy ops. */
270static HMR0VTABLE const g_HmR0OpsDummy =
271{
272 /* .pfnEnterSession = */ hmR0DummyEnter,
273 /* .pfnThreadCtxCallback = */ hmR0DummyThreadCtxCallback,
274 /* .pfnAssertionCallback = */ hmR0DummyAssertionCallback,
275 /* .pfnExportHostState = */ hmR0DummyExportHostState,
276 /* .pfnRunGuestCode = */ hmR0DummyRunGuestCode,
277 /* .pfnEnableCpu = */ hmR0DummyEnableCpu,
278 /* .pfnDisableCpu = */ hmR0DummyDisableCpu,
279 /* .pfnInitVM = */ hmR0DummyInitVM,
280 /* .pfnTermVM = */ hmR0DummyTermVM,
281 /* .pfnSetupVM = */ hmR0DummySetupVM,
282};
283
284/** @} */
285
286
287/**
288 * Initializes a first return code structure.
289 *
290 * @param pFirstRc The structure to init.
291 */
292static void hmR0FirstRcInit(PHMR0FIRSTRC pFirstRc)
293{
294 pFirstRc->rc = VINF_SUCCESS;
295 pFirstRc->idCpu = NIL_RTCPUID;
296}
297
298
299/**
300 * Try set the status code (success ignored).
301 *
302 * @param pFirstRc The first return code structure.
303 * @param rc The status code.
304 */
305static void hmR0FirstRcSetStatus(PHMR0FIRSTRC pFirstRc, int rc)
306{
307 if ( RT_FAILURE(rc)
308 && ASMAtomicCmpXchgS32(&pFirstRc->rc, rc, VINF_SUCCESS))
309 pFirstRc->idCpu = RTMpCpuId();
310}
311
312
313/**
314 * Get the status code of a first return code structure.
315 *
316 * @returns The status code; VINF_SUCCESS or error status, no informational or
317 * warning errors.
318 * @param pFirstRc The first return code structure.
319 */
320static int hmR0FirstRcGetStatus(PHMR0FIRSTRC pFirstRc)
321{
322 return pFirstRc->rc;
323}
324
325
326#ifdef VBOX_STRICT
327# ifndef DEBUG_bird
328/**
329 * Get the CPU ID on which the failure status code was reported.
330 *
331 * @returns The CPU ID, NIL_RTCPUID if no failure was reported.
332 * @param pFirstRc The first return code structure.
333 */
334static RTCPUID hmR0FirstRcGetCpuId(PHMR0FIRSTRC pFirstRc)
335{
336 return pFirstRc->idCpu;
337}
338# endif
339#endif /* VBOX_STRICT */
340
341
342/**
343 * Verify if VMX is really usable by entering and exiting VMX root mode.
344 *
345 * @returns VBox status code.
346 * @param uVmxBasicMsr The host's IA32_VMX_BASIC_MSR value.
347 */
348static int hmR0InitIntelVerifyVmxUsability(uint64_t uVmxBasicMsr)
349{
350 /* Allocate a temporary VMXON region. */
351 RTR0MEMOBJ hScatchMemObj;
352 int rc = RTR0MemObjAllocCont(&hScatchMemObj, HOST_PAGE_SIZE, NIL_RTHCPHYS /* PhysHighest */, false /* fExecutable */);
353 if (RT_FAILURE(rc))
354 {
355 LogRelFunc(("RTR0MemObjAllocCont(,HOST_PAGE_SIZE,false) -> %Rrc\n", rc));
356 return rc;
357 }
358 void *pvScatchPage = RTR0MemObjAddress(hScatchMemObj);
359 RTHCPHYS const HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
360 RT_BZERO(pvScatchPage, HOST_PAGE_SIZE);
361
362 /* Set revision dword at the beginning of the VMXON structure. */
363 *(uint32_t *)pvScatchPage = RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_VMCS_ID);
364
365 /* Make sure we don't get rescheduled to another CPU during this probe. */
366 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
367
368 /* Enable CR4.VMXE if it isn't already set. */
369 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
370
371 /*
372 * The only way of checking if we're in VMX root mode is to try and enter it.
373 * There is no instruction or control bit that tells us if we're in VMX root mode.
374 * Therefore, try and enter and exit VMX root mode.
375 */
376 rc = VMXEnable(HCPhysScratchPage);
377 if (RT_SUCCESS(rc))
378 VMXDisable();
379 else
380 {
381 /*
382 * KVM leaves the CPU in VMX root mode. Not only is this not allowed,
383 * it will crash the host when we enter raw mode, because:
384 *
385 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify
386 * this bit), and
387 * (b) turning off paging causes a #GP (unavoidable when switching
388 * from long to 32 bits mode or 32 bits to PAE).
389 *
390 * They should fix their code, but until they do we simply refuse to run.
391 */
392 rc = VERR_VMX_IN_VMX_ROOT_MODE;
393 }
394
395 /* Restore CR4.VMXE if it wasn't set prior to us setting it above. */
396 if (!(uOldCr4 & X86_CR4_VMXE))
397 SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
398
399 /* Restore interrupts. */
400 ASMSetFlags(fEFlags);
401
402 RTR0MemObjFree(hScatchMemObj, false);
403
404 return rc;
405}
406
407
408/**
409 * Worker function used by hmR0PowerCallback() and HMR0Init() to initalize VT-x
410 * on a CPU.
411 *
412 * @param idCpu The identifier for the CPU the function is called on.
413 * @param pvUser1 Pointer to the first RC structure.
414 * @param pvUser2 Ignored.
415 */
416static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
417{
418 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
419 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
420 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
421 NOREF(idCpu); NOREF(pvUser2);
422
423 int rc = SUPR0GetVmxUsability(NULL /* pfIsSmxModeAmbiguous */);
424 hmR0FirstRcSetStatus(pFirstRc, rc);
425}
426
427
428/**
429 * Intel specific initialization code.
430 *
431 * @returns VBox status code (will only fail if out of memory).
432 */
433static int hmR0InitIntel(void)
434{
435 /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */
436 g_HmMsrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
437
438 /*
439 * First try use native kernel API for controlling VT-x.
440 * (This is only supported by some Mac OS X kernels atm.)
441 */
442 int rc;
443 g_rcHmInit = rc = SUPR0EnableVTx(true /* fEnable */);
444 g_fHmVmxUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
445 if (g_fHmVmxUsingSUPR0EnableVTx)
446 {
447 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
448 if (RT_SUCCESS(rc))
449 {
450 g_fHmVmxSupported = true;
451 rc = SUPR0EnableVTx(false /* fEnable */);
452 AssertLogRelRC(rc);
453 rc = VINF_SUCCESS;
454 }
455 }
456 else
457 {
458 HMR0FIRSTRC FirstRc;
459 hmR0FirstRcInit(&FirstRc);
460 g_rcHmInit = rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
461 if (RT_SUCCESS(rc))
462 g_rcHmInit = rc = hmR0FirstRcGetStatus(&FirstRc);
463 }
464
465 if (RT_SUCCESS(rc))
466 {
467 /* Read CR4 and EFER for logging/diagnostic purposes. */
468 g_uHmVmxHostCr0 = ASMGetCR0();
469 g_uHmVmxHostCr4 = ASMGetCR4();
470 g_uHmVmxHostMsrEfer = ASMRdMsr(MSR_K6_EFER);
471
472 /* Get VMX MSRs (and feature control MSR) for determining VMX features we can ultimately use. */
473 SUPR0GetHwvirtMsrs(&g_HmMsrs, SUPVTCAPS_VT_X, false /* fForce */);
474
475 /*
476 * Nested KVM workaround: Intel SDM section 34.15.5 describes that
477 * MSR_IA32_SMM_MONITOR_CTL depends on bit 49 of MSR_IA32_VMX_BASIC while
478 * table 35-2 says that this MSR is available if either VMX or SMX is supported.
479 */
480 uint64_t const uVmxBasicMsr = g_HmMsrs.u.vmx.u64Basic;
481 if (RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_DUAL_MON))
482 g_uHmVmxHostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
483
484 /*
485 * Host core and memory capabilities MSRs.
486 * Primarily for logging split-lock disable status.
487 */
488 uint32_t uDummy, uStdExtFeatEdx;
489 ASMCpuId_Idx_ECX(7, 0, &uDummy, &uDummy, &uDummy, &uStdExtFeatEdx);
490 if (uStdExtFeatEdx & X86_CPUID_STEXT_FEATURE_EDX_CORECAP)
491 {
492 g_uHmVmxHostCoreCap = ASMRdMsr(MSR_IA32_CORE_CAPABILITIES);
493 if (g_uHmVmxHostCoreCap & MSR_IA32_CORE_CAP_SPLIT_LOCK_DISABLE)
494 g_uHmVmxHostMemoryCtrl = ASMRdMsr(MSR_MEMORY_CTRL);
495 }
496
497 /* Initialize VPID - 16 bits ASID. */
498 g_uHmMaxAsid = 0x10000; /* exclusive */
499
500 /*
501 * If the host OS has not enabled VT-x for us, try enter VMX root mode
502 * to really verify if VT-x is usable.
503 */
504 if (!g_fHmVmxUsingSUPR0EnableVTx)
505 {
506 /*
507 * We don't verify VMX root mode on all CPUs here because the verify
508 * function exits VMX root mode thus potentially allowing other
509 * programs to grab VT-x. Our global init's entering and staying in
510 * VMX root mode (until our module termination) is done later when
511 * the first VM powers up (after module initialization) using
512 * VMMR0_DO_HM_ENABLE which calls HMR0EnableAllCpus().
513 *
514 * This is just a quick sanity check.
515 */
516 rc = hmR0InitIntelVerifyVmxUsability(uVmxBasicMsr);
517 if (RT_SUCCESS(rc))
518 g_fHmVmxSupported = true;
519 else
520 {
521 g_rcHmInit = rc;
522 Assert(g_fHmVmxSupported == false);
523 }
524 }
525
526 if (g_fHmVmxSupported)
527 {
528 rc = VMXR0GlobalInit();
529 if (RT_SUCCESS(rc))
530 {
531 /*
532 * Install the VT-x methods.
533 */
534 g_HmR0Ops = g_HmR0OpsVmx;
535
536 /*
537 * Check for the VMX-Preemption Timer and adjust for the "VMX-Preemption
538 * Timer Does Not Count Down at the Rate Specified" CPU erratum.
539 */
540 if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER)
541 {
542 g_fHmVmxUsePreemptTimer = true;
543 g_cHmVmxPreemptTimerShift = RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
544 if (HMIsSubjectToVmxPreemptTimerErratum())
545 g_cHmVmxPreemptTimerShift = 0; /* This is about right most of the time here. */
546 }
547 else
548 g_fHmVmxUsePreemptTimer = false;
549
550 /*
551 * Check for EFER swapping support.
552 */
553 g_fHmVmxSupportsVmcsEfer = (g_HmMsrs.u.vmx.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
554 && (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
555 && (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR);
556 }
557 else
558 {
559 g_rcHmInit = rc;
560 g_fHmVmxSupported = false;
561 }
562 }
563 }
564#ifdef LOG_ENABLED
565 else
566 SUPR0Printf("hmR0InitIntelCpu failed with rc=%Rrc\n", g_rcHmInit);
567#endif
568 return VINF_SUCCESS;
569}
570
571
572/**
573 * Worker function used by hmR0PowerCallback() and HMR0Init() to initalize AMD-V
574 * on a CPU.
575 *
576 * @param idCpu The identifier for the CPU the function is called on.
577 * @param pvUser1 Pointer to the first RC structure.
578 * @param pvUser2 Ignored.
579 */
580static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
581{
582 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
583 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
584 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
585 NOREF(idCpu); NOREF(pvUser2);
586
587 int rc = SUPR0GetSvmUsability(true /* fInitSvm */);
588 hmR0FirstRcSetStatus(pFirstRc, rc);
589}
590
591
592/**
593 * AMD-specific initialization code.
594 *
595 * @returns VBox status code (will only fail if out of memory).
596 */
597static int hmR0InitAmd(void)
598{
599 /* Call the global AMD-V initialization routine (should only fail in out-of-memory situations). */
600 int rc = SVMR0GlobalInit();
601 if (RT_SUCCESS(rc))
602 {
603 /*
604 * Install the AMD-V methods.
605 */
606 g_HmR0Ops = g_HmR0OpsSvm;
607
608 /* Query AMD features. */
609 uint32_t u32Dummy;
610 ASMCpuId(0x8000000a, &g_uHmSvmRev, &g_uHmMaxAsid, &u32Dummy, &g_fHmSvmFeatures);
611
612 /*
613 * We need to check if AMD-V has been properly initialized on all CPUs.
614 * Some BIOSes might do a poor job.
615 */
616 HMR0FIRSTRC FirstRc;
617 hmR0FirstRcInit(&FirstRc);
618 rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
619 AssertRC(rc);
620 if (RT_SUCCESS(rc))
621 rc = hmR0FirstRcGetStatus(&FirstRc);
622#ifndef DEBUG_bird
623 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
624 ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
625#endif
626 if (RT_SUCCESS(rc))
627 {
628 SUPR0GetHwvirtMsrs(&g_HmMsrs, SUPVTCAPS_AMD_V, false /* fForce */);
629 g_fHmSvmSupported = true;
630 }
631 else
632 {
633 g_rcHmInit = rc;
634 if (rc == VERR_SVM_DISABLED || rc == VERR_SVM_IN_USE)
635 rc = VINF_SUCCESS; /* Don't fail if AMD-V is disabled or in use. */
636 }
637 }
638 else
639 g_rcHmInit = rc;
640 return rc;
641}
642
643
644/**
645 * Does global Ring-0 HM initialization (at module init).
646 *
647 * @returns VBox status code.
648 */
649VMMR0_INT_DECL(int) HMR0Init(void)
650{
651 /*
652 * Initialize the globals.
653 */
654 g_fHmEnabled = false;
655 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++)
656 {
657 g_aHmCpuInfo[i].idCpu = NIL_RTCPUID;
658 g_aHmCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
659 g_aHmCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS;
660 g_aHmCpuInfo[i].pvMemObj = NULL;
661#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
662 g_aHmCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ;
663 g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS;
664 g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm = NULL;
665#endif
666 }
667
668 /* Fill in all callbacks with placeholders. */
669 g_HmR0Ops = g_HmR0OpsDummy;
670
671 /* Default is global VT-x/AMD-V init. */
672 g_fHmGlobalInit = true;
673
674 g_fHmVmxSupported = false;
675 g_fHmSvmSupported = false;
676 g_uHmMaxAsid = 0;
677
678 /*
679 * Get host kernel features that HM might need to know in order
680 * to co-operate and function properly with the host OS (e.g. SMAP).
681 */
682 g_fHmHostKernelFeatures = SUPR0GetKernelFeatures();
683
684 /*
685 * Make sure aCpuInfo is big enough for all the CPUs on this system.
686 */
687 if (RTMpGetArraySize() > RT_ELEMENTS(g_aHmCpuInfo))
688 {
689 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aHmCpuInfo)));
690 return VERR_TOO_MANY_CPUS;
691 }
692
693 /*
694 * Check for VT-x or AMD-V support.
695 * Return failure only in out-of-memory situations.
696 */
697 uint32_t fCaps = 0;
698 int rc = SUPR0GetVTSupport(&fCaps);
699 if (RT_SUCCESS(rc))
700 {
701 if (fCaps & SUPVTCAPS_VT_X)
702 rc = hmR0InitIntel();
703 else
704 {
705 Assert(fCaps & SUPVTCAPS_AMD_V);
706 rc = hmR0InitAmd();
707 }
708 if (RT_SUCCESS(rc))
709 {
710 /*
711 * Register notification callbacks that we can use to disable/enable CPUs
712 * when brought offline/online or suspending/resuming.
713 */
714 if (!g_fHmVmxUsingSUPR0EnableVTx)
715 {
716 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL);
717 if (RT_SUCCESS(rc))
718 {
719 rc = RTPowerNotificationRegister(hmR0PowerCallback, NULL);
720 if (RT_FAILURE(rc))
721 RTMpNotificationDeregister(hmR0MpEventCallback, NULL);
722 }
723 if (RT_FAILURE(rc))
724 {
725 /* There shouldn't be any per-cpu allocations at this point,
726 so just have to call SVMR0GlobalTerm and VMXR0GlobalTerm. */
727 if (fCaps & SUPVTCAPS_VT_X)
728 VMXR0GlobalTerm();
729 else
730 SVMR0GlobalTerm();
731 g_HmR0Ops = g_HmR0OpsDummy;
732 g_rcHmInit = rc;
733 g_fHmSvmSupported = false;
734 g_fHmVmxSupported = false;
735 }
736 }
737 }
738 }
739 else
740 {
741 g_rcHmInit = rc;
742 rc = VINF_SUCCESS; /* We return success here because module init shall not fail if HM fails to initialize. */
743 }
744 return rc;
745}
746
747
748/**
749 * Does global Ring-0 HM termination (at module termination).
750 *
751 * @returns VBox status code (ignored).
752 */
753VMMR0_INT_DECL(int) HMR0Term(void)
754{
755 int rc;
756 if ( g_fHmVmxSupported
757 && g_fHmVmxUsingSUPR0EnableVTx)
758 {
759 /*
760 * Simple if the host OS manages VT-x.
761 */
762 Assert(g_fHmGlobalInit);
763
764 if (g_fHmVmxCalledSUPR0EnableVTx)
765 {
766 rc = SUPR0EnableVTx(false /* fEnable */);
767 g_fHmVmxCalledSUPR0EnableVTx = false;
768 }
769 else
770 rc = VINF_SUCCESS;
771
772 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aHmCpuInfo); iCpu++)
773 {
774 g_aHmCpuInfo[iCpu].fConfigured = false;
775 Assert(g_aHmCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
776 }
777 }
778 else
779 {
780 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx);
781
782 /* Doesn't really matter if this fails. */
783 RTMpNotificationDeregister(hmR0MpEventCallback, NULL);
784 RTPowerNotificationDeregister(hmR0PowerCallback, NULL);
785 rc = VINF_SUCCESS;
786
787 /*
788 * Disable VT-x/AMD-V on all CPUs if we enabled it before.
789 */
790 if (g_fHmGlobalInit)
791 {
792 HMR0FIRSTRC FirstRc;
793 hmR0FirstRcInit(&FirstRc);
794 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL /* pvUser 1 */, &FirstRc);
795 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
796 if (RT_SUCCESS(rc))
797 rc = hmR0FirstRcGetStatus(&FirstRc);
798 }
799
800 /*
801 * Free the per-cpu pages used for VT-x and AMD-V.
802 */
803 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++)
804 {
805 if (g_aHmCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)
806 {
807 RTR0MemObjFree(g_aHmCpuInfo[i].hMemObj, false);
808 g_aHmCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
809 g_aHmCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS;
810 g_aHmCpuInfo[i].pvMemObj = NULL;
811 }
812#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
813 if (g_aHmCpuInfo[i].n.svm.hNstGstMsrpm != NIL_RTR0MEMOBJ)
814 {
815 RTR0MemObjFree(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm, false);
816 g_aHmCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ;
817 g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS;
818 g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm = NULL;
819 }
820#endif
821 }
822 }
823
824 /** @todo This needs cleaning up. There's no matching
825 * hmR0TermIntel()/hmR0TermAmd() and all the VT-x/AMD-V specific bits
826 * should move into their respective modules. */
827 /* Finally, call global VT-x/AMD-V termination. */
828 if (g_fHmVmxSupported)
829 VMXR0GlobalTerm();
830 else if (g_fHmSvmSupported)
831 SVMR0GlobalTerm();
832
833 return rc;
834}
835
836
837/**
838 * Enable VT-x or AMD-V on the current CPU
839 *
840 * @returns VBox status code.
841 * @param pVM The cross context VM structure. Can be NULL.
842 * @param idCpu The identifier for the CPU the function is called on.
843 *
844 * @remarks Maybe called with interrupts disabled!
845 */
846static int hmR0EnableCpu(PVMCC pVM, RTCPUID idCpu)
847{
848 PHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu];
849
850 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
851 Assert(idCpu < RT_ELEMENTS(g_aHmCpuInfo));
852 Assert(!pHostCpu->fConfigured);
853 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
854
855 pHostCpu->idCpu = idCpu;
856 /* Do NOT reset cTlbFlushes here, see @bugref{6255}. */
857
858 int rc;
859 if ( g_fHmVmxSupported
860 && g_fHmVmxUsingSUPR0EnableVTx)
861 rc = g_HmR0Ops.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmMsrs);
862 else
863 {
864 AssertLogRelMsgReturn(pHostCpu->hMemObj != NIL_RTR0MEMOBJ, ("hmR0EnableCpu failed idCpu=%u.\n", idCpu), VERR_HM_IPE_1);
865 rc = g_HmR0Ops.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmMsrs);
866 }
867 if (RT_SUCCESS(rc))
868 pHostCpu->fConfigured = true;
869 return rc;
870}
871
872
873/**
874 * Worker function passed to RTMpOnAll() that is to be called on all CPUs.
875 *
876 * @param idCpu The identifier for the CPU the function is called on.
877 * @param pvUser1 Opaque pointer to the VM (can be NULL!).
878 * @param pvUser2 The 2nd user argument.
879 */
880static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
881{
882 PVMCC pVM = (PVMCC)pvUser1; /* can be NULL! */
883 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2;
884 AssertReturnVoid(g_fHmGlobalInit);
885 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
886 hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu));
887}
888
889
890/**
891 * RTOnce callback employed by HMR0EnableAllCpus.
892 *
893 * @returns VBox status code.
894 * @param pvUser Pointer to the VM.
895 */
896static DECLCALLBACK(int32_t) hmR0EnableAllCpuOnce(void *pvUser)
897{
898 PVMCC pVM = (PVMCC)pvUser;
899
900 /*
901 * Indicate that we've initialized.
902 *
903 * Note! There is a potential race between this function and the suspend
904 * notification. Kind of unlikely though, so ignored for now.
905 */
906 AssertReturn(!g_fHmEnabled, VERR_HM_ALREADY_ENABLED_IPE);
907 ASMAtomicWriteBool(&g_fHmEnabled, true);
908
909 /*
910 * The global init variable is set by the first VM.
911 */
912 g_fHmGlobalInit = pVM->hm.s.fGlobalInit;
913
914#ifdef VBOX_STRICT
915 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++)
916 {
917 Assert(g_aHmCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
918 Assert(g_aHmCpuInfo[i].HCPhysMemObj == NIL_RTHCPHYS);
919 Assert(g_aHmCpuInfo[i].pvMemObj == NULL);
920 Assert(!g_aHmCpuInfo[i].fConfigured);
921 Assert(!g_aHmCpuInfo[i].cTlbFlushes);
922 Assert(!g_aHmCpuInfo[i].uCurrentAsid);
923# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
924 Assert(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ);
925 Assert(g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm == NIL_RTHCPHYS);
926 Assert(g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm == NULL);
927# endif
928 }
929#endif
930
931 int rc;
932 if ( g_fHmVmxSupported
933 && g_fHmVmxUsingSUPR0EnableVTx)
934 {
935 /*
936 * Global VT-x initialization API (only darwin for now).
937 */
938 rc = SUPR0EnableVTx(true /* fEnable */);
939 if (RT_SUCCESS(rc))
940 {
941 g_fHmVmxCalledSUPR0EnableVTx = true;
942 /* If the host provides a VT-x init API, then we'll rely on that for global init. */
943 g_fHmGlobalInit = pVM->hm.s.fGlobalInit = true;
944 }
945 else
946 AssertMsgFailed(("hmR0EnableAllCpuOnce/SUPR0EnableVTx: rc=%Rrc\n", rc));
947 }
948 else
949 {
950 /*
951 * We're doing the job ourselves.
952 */
953 /* Allocate one page per cpu for the global VT-x and AMD-V pages */
954 for (unsigned i = 0; i < RT_ELEMENTS(g_aHmCpuInfo); i++)
955 {
956 Assert(g_aHmCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
957#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
958 Assert(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ);
959#endif
960 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i)))
961 {
962 /** @todo NUMA */
963 rc = RTR0MemObjAllocCont(&g_aHmCpuInfo[i].hMemObj, HOST_PAGE_SIZE, NIL_RTHCPHYS /*PhysHighest*/, false /* executable R0 mapping */);
964 AssertLogRelRCReturn(rc, rc);
965
966 g_aHmCpuInfo[i].HCPhysMemObj = RTR0MemObjGetPagePhysAddr(g_aHmCpuInfo[i].hMemObj, 0);
967 Assert(g_aHmCpuInfo[i].HCPhysMemObj != NIL_RTHCPHYS);
968 Assert(!(g_aHmCpuInfo[i].HCPhysMemObj & HOST_PAGE_OFFSET_MASK));
969
970 g_aHmCpuInfo[i].pvMemObj = RTR0MemObjAddress(g_aHmCpuInfo[i].hMemObj);
971 AssertPtr(g_aHmCpuInfo[i].pvMemObj);
972 RT_BZERO(g_aHmCpuInfo[i].pvMemObj, HOST_PAGE_SIZE);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
975 rc = RTR0MemObjAllocCont(&g_aHmCpuInfo[i].n.svm.hNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT,
976 NIL_RTHCPHYS /*PhysHighest*/, false /* executable R0 mapping */);
977 AssertLogRelRCReturn(rc, rc);
978
979 g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm = RTR0MemObjGetPagePhysAddr(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm, 0);
980 Assert(g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm != NIL_RTHCPHYS);
981 Assert(!(g_aHmCpuInfo[i].n.svm.HCPhysNstGstMsrpm & HOST_PAGE_OFFSET_MASK));
982
983 g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm = RTR0MemObjAddress(g_aHmCpuInfo[i].n.svm.hNstGstMsrpm);
984 AssertPtr(g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm);
985 ASMMemFill32(g_aHmCpuInfo[i].n.svm.pvNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
986#endif
987 }
988 }
989
990 rc = VINF_SUCCESS;
991 }
992
993 if ( RT_SUCCESS(rc)
994 && g_fHmGlobalInit)
995 {
996 /*
997 * It's possible we end up here with VMX (and perhaps SVM) not supported, see @bugref{9918}.
998 * In that case, our HMR0 function table contains the dummy placeholder functions which pretend
999 * success. However, we must not pretend success any longer (like we did during HMR0Init called
1000 * during VMMR0 module init) as the HM init error code (g_rcHmInit) should be propagated to
1001 * ring-3 especially since we now have a VM instance.
1002 */
1003 if ( !g_fHmVmxSupported
1004 && !g_fHmSvmSupported)
1005 {
1006 Assert(g_HmR0Ops.pfnEnableCpu == hmR0DummyEnableCpu);
1007 Assert(RT_FAILURE(g_rcHmInit));
1008 rc = g_rcHmInit;
1009 }
1010 else
1011 {
1012 /* First time, so initialize each cpu/core. */
1013 HMR0FIRSTRC FirstRc;
1014 hmR0FirstRcInit(&FirstRc);
1015 Assert(g_HmR0Ops.pfnEnableCpu != hmR0DummyEnableCpu);
1016 rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc);
1017 if (RT_SUCCESS(rc))
1018 rc = hmR0FirstRcGetStatus(&FirstRc);
1019 }
1020 }
1021
1022 return rc;
1023}
1024
1025
1026/**
1027 * Sets up HM on all cpus.
1028 *
1029 * @returns VBox status code.
1030 * @param pVM The cross context VM structure.
1031 */
1032VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVMCC pVM)
1033{
1034 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
1035 if (ASMAtomicReadBool(&g_fHmSuspended))
1036 return VERR_HM_SUSPEND_PENDING;
1037
1038 return RTOnce(&g_HmEnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM);
1039}
1040
1041
1042/**
1043 * Disable VT-x or AMD-V on the current CPU.
1044 *
1045 * @returns VBox status code.
1046 * @param idCpu The identifier for the CPU this function is called on.
1047 *
1048 * @remarks Must be called with preemption disabled.
1049 */
1050static int hmR0DisableCpu(RTCPUID idCpu)
1051{
1052 PHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu];
1053
1054 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx);
1055 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1056 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
1057 Assert(idCpu < RT_ELEMENTS(g_aHmCpuInfo));
1058 Assert(!pHostCpu->fConfigured || pHostCpu->hMemObj != NIL_RTR0MEMOBJ);
1059 AssertRelease(idCpu == RTMpCpuId());
1060
1061 if (pHostCpu->hMemObj == NIL_RTR0MEMOBJ)
1062 return pHostCpu->fConfigured ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */;
1063 AssertPtr(pHostCpu->pvMemObj);
1064 Assert(pHostCpu->HCPhysMemObj != NIL_RTHCPHYS);
1065
1066 int rc;
1067 if (pHostCpu->fConfigured)
1068 {
1069 rc = g_HmR0Ops.pfnDisableCpu(pHostCpu, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);
1070 AssertRCReturn(rc, rc);
1071
1072 pHostCpu->fConfigured = false;
1073 pHostCpu->idCpu = NIL_RTCPUID;
1074 }
1075 else
1076 rc = VINF_SUCCESS; /* nothing to do */
1077 return rc;
1078}
1079
1080
1081/**
1082 * Worker function passed to RTMpOnAll() that is to be called on the target
1083 * CPUs.
1084 *
1085 * @param idCpu The identifier for the CPU the function is called on.
1086 * @param pvUser1 The 1st user argument.
1087 * @param pvUser2 Opaque pointer to the FirstRc.
1088 */
1089static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1090{
1091 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; NOREF(pvUser1);
1092 AssertReturnVoid(g_fHmGlobalInit);
1093 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu));
1094}
1095
1096
1097/**
1098 * Worker function passed to RTMpOnSpecific() that is to be called on the target
1099 * CPU.
1100 *
1101 * @param idCpu The identifier for the CPU the function is called on.
1102 * @param pvUser1 Null, not used.
1103 * @param pvUser2 Null, not used.
1104 */
1105static DECLCALLBACK(void) hmR0DisableCpuOnSpecificCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1106{
1107 NOREF(pvUser1);
1108 NOREF(pvUser2);
1109 hmR0DisableCpu(idCpu);
1110}
1111
1112
1113/**
1114 * Callback function invoked when a cpu goes online or offline.
1115 *
1116 * @param enmEvent The Mp event.
1117 * @param idCpu The identifier for the CPU the function is called on.
1118 * @param pvData Opaque data (PVMCC pointer).
1119 */
1120static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData)
1121{
1122 NOREF(pvData);
1123 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx);
1124
1125 /*
1126 * We only care about uninitializing a CPU that is going offline. When a
1127 * CPU comes online, the initialization is done lazily in HMR0Enter().
1128 */
1129 switch (enmEvent)
1130 {
1131 case RTMPEVENT_OFFLINE:
1132 {
1133 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1134 RTThreadPreemptDisable(&PreemptState);
1135 if (idCpu == RTMpCpuId())
1136 {
1137 int rc = hmR0DisableCpu(idCpu);
1138 AssertRC(rc);
1139 RTThreadPreemptRestore(&PreemptState);
1140 }
1141 else
1142 {
1143 RTThreadPreemptRestore(&PreemptState);
1144 RTMpOnSpecific(idCpu, hmR0DisableCpuOnSpecificCallback, NULL /* pvUser1 */, NULL /* pvUser2 */);
1145 }
1146 break;
1147 }
1148
1149 default:
1150 break;
1151 }
1152}
1153
1154
1155/**
1156 * Called whenever a system power state change occurs.
1157 *
1158 * @param enmEvent The Power event.
1159 * @param pvUser User argument.
1160 */
1161static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
1162{
1163 NOREF(pvUser);
1164 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx);
1165
1166#ifdef LOG_ENABLED
1167 if (enmEvent == RTPOWEREVENT_SUSPEND)
1168 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
1169 else
1170 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_RESUME\n");
1171#endif
1172
1173 if (enmEvent == RTPOWEREVENT_SUSPEND)
1174 ASMAtomicWriteBool(&g_fHmSuspended, true);
1175
1176 if (g_fHmEnabled)
1177 {
1178 int rc;
1179 HMR0FIRSTRC FirstRc;
1180 hmR0FirstRcInit(&FirstRc);
1181
1182 if (enmEvent == RTPOWEREVENT_SUSPEND)
1183 {
1184 if (g_fHmGlobalInit)
1185 {
1186 /* Turn off VT-x or AMD-V on all CPUs. */
1187 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL /* pvUser 1 */, &FirstRc);
1188 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1189 }
1190 /* else nothing to do here for the local init case */
1191 }
1192 else
1193 {
1194 /* Reinit the CPUs from scratch as the suspend state might have
1195 messed with the MSRs. (lousy BIOSes as usual) */
1196 if (g_fHmVmxSupported)
1197 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
1198 else
1199 rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
1200 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1201 if (RT_SUCCESS(rc))
1202 rc = hmR0FirstRcGetStatus(&FirstRc);
1203#ifdef LOG_ENABLED
1204 if (RT_FAILURE(rc))
1205 SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc);
1206#endif
1207 if (g_fHmGlobalInit)
1208 {
1209 /* Turn VT-x or AMD-V back on on all CPUs. */
1210 rc = RTMpOnAll(hmR0EnableCpuCallback, NULL /* pVM */, &FirstRc /* output ignored */);
1211 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1212 }
1213 /* else nothing to do here for the local init case */
1214 }
1215 }
1216
1217 if (enmEvent == RTPOWEREVENT_RESUME)
1218 ASMAtomicWriteBool(&g_fHmSuspended, false);
1219}
1220
1221
1222/**
1223 * Does ring-0 per-VM HM initialization.
1224 *
1225 * This will call the CPU specific init. routine which may initialize and allocate
1226 * resources for virtual CPUs.
1227 *
1228 * @returns VBox status code.
1229 * @param pVM The cross context VM structure.
1230 *
1231 * @remarks This is called after HMR3Init(), see vmR3CreateU() and
1232 * vmR3InitRing3().
1233 */
1234VMMR0_INT_DECL(int) HMR0InitVM(PVMCC pVM)
1235{
1236 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
1237 AssertCompile(sizeof(pVM->hmr0.s) <= sizeof(pVM->hmr0.padding));
1238 AssertCompile(sizeof(pVM->aCpus[0].hm.s) <= sizeof(pVM->aCpus[0].hm.padding));
1239 AssertCompile(sizeof(pVM->aCpus[0].hmr0.s) <= sizeof(pVM->aCpus[0].hmr0.padding));
1240 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1241
1242 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
1243 if (ASMAtomicReadBool(&g_fHmSuspended))
1244 return VERR_HM_SUSPEND_PENDING;
1245
1246 /*
1247 * Copy globals to the VM structure.
1248 */
1249 Assert(!(pVM->hm.s.vmx.fSupported && pVM->hm.s.svm.fSupported));
1250 if (pVM->hm.s.vmx.fSupported)
1251 {
1252 pVM->hmr0.s.vmx.fUsePreemptTimer = pVM->hm.s.vmx.fUsePreemptTimerCfg && g_fHmVmxUsePreemptTimer;
1253 pVM->hm.s.vmx.fUsePreemptTimerCfg = pVM->hmr0.s.vmx.fUsePreemptTimer;
1254 pVM->hm.s.vmx.cPreemptTimerShift = g_cHmVmxPreemptTimerShift;
1255 pVM->hm.s.ForR3.vmx.u64HostCr0 = g_uHmVmxHostCr0;
1256 pVM->hm.s.ForR3.vmx.u64HostCr4 = g_uHmVmxHostCr4;
1257 pVM->hm.s.ForR3.vmx.u64HostMsrEfer = g_uHmVmxHostMsrEfer;
1258 pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl = g_uHmVmxHostSmmMonitorCtl;
1259 pVM->hm.s.ForR3.vmx.u64HostCoreCap = g_uHmVmxHostCoreCap;
1260 pVM->hm.s.ForR3.vmx.u64HostMemoryCtrl = g_uHmVmxHostMemoryCtrl;
1261 pVM->hm.s.ForR3.vmx.u64HostFeatCtrl = g_HmMsrs.u.vmx.u64FeatCtrl;
1262 HMGetVmxMsrsFromHwvirtMsrs(&g_HmMsrs, &pVM->hm.s.ForR3.vmx.Msrs);
1263 /* If you need to tweak host MSRs for testing VMX R0 code, do it here. */
1264
1265 /* Enable VPID if supported and configured. */
1266 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID)
1267 pVM->hm.s.ForR3.vmx.fVpid = pVM->hmr0.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */
1268
1269 /* Use VMCS shadowing if supported. */
1270 pVM->hmr0.s.vmx.fUseVmcsShadowing = pVM->cpum.ro.GuestFeatures.fVmx
1271 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING);
1272 pVM->hm.s.ForR3.vmx.fUseVmcsShadowing = pVM->hmr0.s.vmx.fUseVmcsShadowing;
1273
1274 /* Use the VMCS controls for swapping the EFER MSR if supported. */
1275 pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer = g_fHmVmxSupportsVmcsEfer;
1276
1277#if 0
1278 /* Enable APIC register virtualization and virtual-interrupt delivery if supported. */
1279 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT)
1280 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY))
1281 pVM->hm.s.fVirtApicRegs = true;
1282
1283 /* Enable posted-interrupt processing if supported. */
1284 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
1285 * here. */
1286 if ( (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT)
1287 && (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT))
1288 pVM->hm.s.fPostedIntrs = true;
1289#endif
1290 }
1291 else if (pVM->hm.s.svm.fSupported)
1292 {
1293 pVM->hm.s.ForR3.svm.u32Rev = g_uHmSvmRev;
1294 pVM->hm.s.ForR3.svm.fFeatures = g_fHmSvmFeatures;
1295 pVM->hm.s.ForR3.svm.u64MsrHwcr = g_HmMsrs.u.svm.u64MsrHwcr;
1296 /* If you need to tweak host MSRs for testing SVM R0 code, do it here. */
1297 }
1298 pVM->hm.s.ForR3.rcInit = g_rcHmInit;
1299 pVM->hm.s.ForR3.uMaxAsid = g_uHmMaxAsid;
1300
1301 /*
1302 * Set default maximum inner loops in ring-0 before returning to ring-3.
1303 * Can be overriden using CFGM.
1304 */
1305 uint32_t cMaxResumeLoops = pVM->hm.s.cMaxResumeLoopsCfg;
1306 if (!cMaxResumeLoops)
1307 {
1308 cMaxResumeLoops = 1024;
1309 if (RTThreadPreemptIsPendingTrusty())
1310 cMaxResumeLoops = 8192;
1311 }
1312 else if (cMaxResumeLoops > 16384)
1313 cMaxResumeLoops = 16384;
1314 else if (cMaxResumeLoops < 32)
1315 cMaxResumeLoops = 32;
1316 pVM->hm.s.cMaxResumeLoopsCfg = pVM->hmr0.s.cMaxResumeLoops = cMaxResumeLoops;
1317
1318 /*
1319 * Initialize some per-VCPU fields.
1320 */
1321 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1322 {
1323 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
1324 pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID;
1325 pVCpu->hmr0.s.idLastCpu = NIL_RTCPUID;
1326
1327 /* We'll aways increment this the first time (host uses ASID 0). */
1328 AssertReturn(!pVCpu->hmr0.s.uCurrentAsid, VERR_HM_IPE_3);
1329 }
1330
1331 /*
1332 * Configure defences against spectre and other CPU bugs.
1333 */
1334 uint32_t fWorldSwitcher = 0;
1335 uint32_t cLastStdLeaf = ASMCpuId_EAX(0);
1336 if (cLastStdLeaf >= 0x00000007 && RTX86IsValidStdRange(cLastStdLeaf))
1337 {
1338 uint32_t uEdx = 0;
1339 ASMCpuIdExSlow(0x00000007, 0, 0, 0, NULL, NULL, NULL, &uEdx);
1340
1341 if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB)
1342 {
1343 if (pVM->hm.s.fIbpbOnVmExit)
1344 fWorldSwitcher |= HM_WSF_IBPB_EXIT;
1345 if (pVM->hm.s.fIbpbOnVmEntry)
1346 fWorldSwitcher |= HM_WSF_IBPB_ENTRY;
1347 }
1348 if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)
1349 {
1350 if (pVM->hm.s.fL1dFlushOnVmEntry)
1351 fWorldSwitcher |= HM_WSF_L1D_ENTRY;
1352 else if (pVM->hm.s.fL1dFlushOnSched)
1353 fWorldSwitcher |= HM_WSF_L1D_SCHED;
1354 }
1355 if (uEdx & X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR)
1356 {
1357 if (pVM->hm.s.fMdsClearOnVmEntry)
1358 fWorldSwitcher |= HM_WSF_MDS_ENTRY;
1359 else if (pVM->hm.s.fMdsClearOnSched)
1360 fWorldSwitcher |= HM_WSF_MDS_SCHED;
1361 }
1362 }
1363 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1364 {
1365 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
1366 pVCpu->hmr0.s.fWorldSwitcher = fWorldSwitcher;
1367 }
1368 pVM->hm.s.ForR3.fWorldSwitcher = fWorldSwitcher;
1369
1370
1371 /*
1372 * Call the hardware specific initialization method.
1373 */
1374 return g_HmR0Ops.pfnInitVM(pVM);
1375}
1376
1377
1378/**
1379 * Does ring-0 per VM HM termination.
1380 *
1381 * @returns VBox status code.
1382 * @param pVM The cross context VM structure.
1383 */
1384VMMR0_INT_DECL(int) HMR0TermVM(PVMCC pVM)
1385{
1386 Log(("HMR0TermVM: %p\n", pVM));
1387 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1388
1389 /*
1390 * Call the hardware specific method.
1391 *
1392 * Note! We might be preparing for a suspend, so the pfnTermVM() functions should probably not
1393 * mess with VT-x/AMD-V features on the CPU, currently all they do is free memory so this is safe.
1394 */
1395 return g_HmR0Ops.pfnTermVM(pVM);
1396}
1397
1398
1399/**
1400 * Sets up a VT-x or AMD-V session.
1401 *
1402 * This is mostly about setting up the hardware VM state.
1403 *
1404 * @returns VBox status code.
1405 * @param pVM The cross context VM structure.
1406 */
1407VMMR0_INT_DECL(int) HMR0SetupVM(PVMCC pVM)
1408{
1409 Log(("HMR0SetupVM: %p\n", pVM));
1410 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1411
1412 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
1413 AssertReturn(!ASMAtomicReadBool(&g_fHmSuspended), VERR_HM_SUSPEND_PENDING);
1414
1415 /* On first entry we'll sync everything. */
1416 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
1417
1418 /*
1419 * Call the hardware specific setup VM method. This requires the CPU to be
1420 * enabled for AMD-V/VT-x and preemption to be prevented.
1421 */
1422 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1423 RTThreadPreemptDisable(&PreemptState);
1424 RTCPUID const idCpu = RTMpCpuId();
1425
1426 /* Enable VT-x or AMD-V if local init is required. */
1427 int rc;
1428 if (!g_fHmGlobalInit)
1429 {
1430 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx);
1431 rc = hmR0EnableCpu(pVM, idCpu);
1432 if (RT_FAILURE(rc))
1433 {
1434 RTThreadPreemptRestore(&PreemptState);
1435 return rc;
1436 }
1437 }
1438
1439 /* Setup VT-x or AMD-V. */
1440 rc = g_HmR0Ops.pfnSetupVM(pVM);
1441
1442 /* Disable VT-x or AMD-V if local init was done before. */
1443 if (!g_fHmGlobalInit)
1444 {
1445 Assert(!g_fHmVmxSupported || !g_fHmVmxUsingSUPR0EnableVTx);
1446 int rc2 = hmR0DisableCpu(idCpu);
1447 AssertRC(rc2);
1448 }
1449
1450 RTThreadPreemptRestore(&PreemptState);
1451 return rc;
1452}
1453
1454
1455/**
1456 * Notification callback before an assertion longjump and guru mediation.
1457 *
1458 * @returns VBox status code.
1459 * @param pVCpu The cross context virtual CPU structure.
1460 * @param pvUser User argument, currently unused, NULL.
1461 */
1462static DECLCALLBACK(int) hmR0AssertionCallback(PVMCPUCC pVCpu, void *pvUser)
1463{
1464 RT_NOREF(pvUser);
1465 Assert(pVCpu);
1466 Assert(g_HmR0Ops.pfnAssertionCallback);
1467 return g_HmR0Ops.pfnAssertionCallback(pVCpu);
1468}
1469
1470
1471/**
1472 * Turns on HM on the CPU if necessary and initializes the bare minimum state
1473 * required for entering HM context.
1474 *
1475 * @returns VBox status code.
1476 * @param pVCpu The cross context virtual CPU structure.
1477 *
1478 * @remarks No-long-jump zone!!!
1479 */
1480VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPUCC pVCpu)
1481{
1482 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1483
1484 int rc = VINF_SUCCESS;
1485 RTCPUID const idCpu = RTMpCpuId();
1486 PHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu];
1487 AssertPtr(pHostCpu);
1488
1489 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
1490 if (!pHostCpu->fConfigured)
1491 rc = hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
1492
1493 /* Register a callback to fire prior to performing a longjmp to ring-3 so HM can disable VT-x/AMD-V if needed. */
1494 VMMR0AssertionSetNotification(pVCpu, hmR0AssertionCallback, NULL /*pvUser*/);
1495
1496 /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */
1497 if (g_fHmVmxSupported)
1498 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
1499 else
1500 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE;
1501
1502 Assert(pHostCpu->idCpu == idCpu && pHostCpu->idCpu != NIL_RTCPUID);
1503 pVCpu->hmr0.s.idEnteredCpu = idCpu;
1504 return rc;
1505}
1506
1507
1508/**
1509 * Enters the VT-x or AMD-V session.
1510 *
1511 * @returns VBox status code.
1512 * @param pVCpu The cross context virtual CPU structure.
1513 *
1514 * @remarks This is called with preemption disabled.
1515 */
1516VMMR0_INT_DECL(int) HMR0Enter(PVMCPUCC pVCpu)
1517{
1518 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
1519 AssertReturn(!ASMAtomicReadBool(&g_fHmSuspended), VERR_HM_SUSPEND_PENDING);
1520 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1521
1522 /* Load the bare minimum state required for entering HM. */
1523 int rc = hmR0EnterCpu(pVCpu);
1524 if (RT_SUCCESS(rc))
1525 {
1526 if (g_fHmVmxSupported)
1527 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
1528 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
1529 else
1530 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
1531 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
1532
1533 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
1534 rc = g_HmR0Ops.pfnEnterSession(pVCpu);
1535 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID, rc);
1536
1537 /* Exports the host-state as we may be resuming code after a longjmp and quite
1538 possibly now be scheduled on a different CPU. */
1539 rc = g_HmR0Ops.pfnExportHostState(pVCpu);
1540 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID, rc);
1541 }
1542 return rc;
1543}
1544
1545
1546/**
1547 * Deinitializes the bare minimum state used for HM context and if necessary
1548 * disable HM on the CPU.
1549 *
1550 * @returns VBox status code.
1551 * @param pVCpu The cross context virtual CPU structure.
1552 *
1553 * @remarks No-long-jump zone!!!
1554 */
1555VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPUCC pVCpu)
1556{
1557 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1558 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_HM_WRONG_CPU);
1559
1560 RTCPUID const idCpu = RTMpCpuId();
1561 PCHMPHYSCPU pHostCpu = &g_aHmCpuInfo[idCpu];
1562
1563 if ( !g_fHmGlobalInit
1564 && pHostCpu->fConfigured)
1565 {
1566 int rc = hmR0DisableCpu(idCpu);
1567 AssertRCReturn(rc, rc);
1568 Assert(!pHostCpu->fConfigured);
1569 Assert(pHostCpu->idCpu == NIL_RTCPUID);
1570
1571 /* For obtaining a non-zero ASID/VPID on next re-entry. */
1572 pVCpu->hmr0.s.idLastCpu = NIL_RTCPUID;
1573 }
1574
1575 /* Clear it while leaving HM context, hmPokeCpuForTlbFlush() relies on this. */
1576 pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID;
1577
1578 /* De-register the longjmp-to-ring 3 callback now that we have reliquished hardware resources. */
1579 VMMR0AssertionRemoveNotification(pVCpu);
1580 return VINF_SUCCESS;
1581}
1582
1583
1584/**
1585 * Thread-context hook for HM.
1586 *
1587 * This is used together with RTThreadCtxHookCreate() on platforms which
1588 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
1589 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
1590 *
1591 * @param enmEvent The thread-context event.
1592 * @param pvUser Opaque pointer to the VMCPU.
1593 */
1594VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
1595{
1596 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
1597 Assert(pVCpu);
1598 Assert(g_HmR0Ops.pfnThreadCtxCallback);
1599
1600 g_HmR0Ops.pfnThreadCtxCallback(enmEvent, pVCpu, g_fHmGlobalInit);
1601}
1602
1603
1604/**
1605 * Runs guest code in a hardware accelerated VM.
1606 *
1607 * @returns Strict VBox status code. (VBOXSTRICTRC isn't used because it's
1608 * called from setjmp assembly.)
1609 * @param pVM The cross context VM structure.
1610 * @param pVCpu The cross context virtual CPU structure.
1611 *
1612 * @remarks Can be called with preemption enabled if thread-context hooks are
1613 * used!!!
1614 */
1615VMMR0_INT_DECL(int) HMR0RunGuestCode(PVMCC pVM, PVMCPUCC pVCpu)
1616{
1617 RT_NOREF(pVM);
1618
1619#ifdef VBOX_STRICT
1620 /* With thread-context hooks we would be running this code with preemption enabled. */
1621 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
1622 {
1623 PCHMPHYSCPU pHostCpu = &g_aHmCpuInfo[RTMpCpuId()];
1624 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
1625 Assert(pHostCpu->fConfigured);
1626 AssertReturn(!ASMAtomicReadBool(&g_fHmSuspended), VERR_HM_SUSPEND_PENDING);
1627 }
1628#endif
1629
1630 VBOXSTRICTRC rcStrict = g_HmR0Ops.pfnRunGuestCode(pVCpu);
1631 return VBOXSTRICTRC_VAL(rcStrict);
1632}
1633
1634
1635/**
1636 * Notification from CPUM that it has unloaded the guest FPU/SSE/AVX state from
1637 * the host CPU and that guest access to it must be intercepted.
1638 *
1639 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1640 */
1641VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPUCC pVCpu)
1642{
1643 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
1644}
1645
1646
1647/**
1648 * Notification from CPUM that it has modified the host CR0 (because of FPU).
1649 *
1650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1651 */
1652VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPUCC pVCpu)
1653{
1654 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT);
1655}
1656
1657
1658/**
1659 * Returns suspend status of the host.
1660 *
1661 * @returns Suspend pending or not.
1662 */
1663VMMR0_INT_DECL(bool) HMR0SuspendPending(void)
1664{
1665 return ASMAtomicReadBool(&g_fHmSuspended);
1666}
1667
1668
1669/**
1670 * Invalidates a guest page from the host TLB.
1671 *
1672 * @param pVCpu The cross context virtual CPU structure.
1673 * @param GCVirt Page to invalidate.
1674 */
1675VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt)
1676{
1677 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1678 if (pVM->hm.s.vmx.fSupported)
1679 return VMXR0InvalidatePage(pVCpu, GCVirt);
1680 return SVMR0InvalidatePage(pVCpu, GCVirt);
1681}
1682
1683
1684/**
1685 * Returns the cpu structure for the current cpu.
1686 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1687 *
1688 * @returns The cpu structure pointer.
1689 */
1690VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void)
1691{
1692 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1693 RTCPUID const idCpu = RTMpCpuId();
1694 Assert(idCpu < RT_ELEMENTS(g_aHmCpuInfo));
1695 return &g_aHmCpuInfo[idCpu];
1696}
1697
1698
1699/**
1700 * Interface for importing state on demand (used by IEM).
1701 *
1702 * @returns VBox status code.
1703 * @param pVCpu The cross context CPU structure.
1704 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1705 */
1706VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1707{
1708 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
1709 return VMXR0ImportStateOnDemand(pVCpu, fWhat);
1710 return SVMR0ImportStateOnDemand(pVCpu, fWhat);
1711}
1712
1713
1714/**
1715 * Gets HM VM-exit auxiliary information.
1716 *
1717 * @returns VBox status code.
1718 * @param pVCpu The cross context CPU structure.
1719 * @param pHmExitAux Where to store the auxiliary info.
1720 * @param fWhat What to get, see HMVMX_READ_XXX. This is ignored/unused
1721 * on AMD-V.
1722 *
1723 * @remarks Currently this works only when executing a nested-guest using
1724 * hardware-assisted execution as it's where the auxiliary information is
1725 * required outside of HM. In the future we can make this available while
1726 * executing a regular (non-nested) guest if necessary.
1727 */
1728VMMR0_INT_DECL(int) HMR0GetExitAuxInfo(PVMCPUCC pVCpu, PHMEXITAUX pHmExitAux, uint32_t fWhat)
1729{
1730 Assert(pHmExitAux);
1731 Assert(!(fWhat & ~HMVMX_READ_VALID_MASK));
1732 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
1733 return VMXR0GetExitAuxInfo(pVCpu, &pHmExitAux->Vmx, fWhat);
1734 return SVMR0GetExitAuxInfo(pVCpu, &pHmExitAux->Svm);
1735}
1736
1737
1738#ifdef VBOX_STRICT
1739
1740/**
1741 * Dumps a descriptor.
1742 *
1743 * @param pDesc Descriptor to dump.
1744 * @param Sel The selector.
1745 * @param pszSel The name of the selector.
1746 */
1747VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszSel)
1748{
1749 /*
1750 * Make variable description string.
1751 */
1752 static struct
1753 {
1754 unsigned cch;
1755 const char *psz;
1756 } const s_aTypes[32] =
1757 {
1758# define STRENTRY(str) { sizeof(str) - 1, str }
1759
1760 /* system */
1761# if HC_ARCH_BITS == 64
1762 STRENTRY("Reserved0 "), /* 0x00 */
1763 STRENTRY("Reserved1 "), /* 0x01 */
1764 STRENTRY("LDT "), /* 0x02 */
1765 STRENTRY("Reserved3 "), /* 0x03 */
1766 STRENTRY("Reserved4 "), /* 0x04 */
1767 STRENTRY("Reserved5 "), /* 0x05 */
1768 STRENTRY("Reserved6 "), /* 0x06 */
1769 STRENTRY("Reserved7 "), /* 0x07 */
1770 STRENTRY("Reserved8 "), /* 0x08 */
1771 STRENTRY("TSS64Avail "), /* 0x09 */
1772 STRENTRY("ReservedA "), /* 0x0a */
1773 STRENTRY("TSS64Busy "), /* 0x0b */
1774 STRENTRY("Call64 "), /* 0x0c */
1775 STRENTRY("ReservedD "), /* 0x0d */
1776 STRENTRY("Int64 "), /* 0x0e */
1777 STRENTRY("Trap64 "), /* 0x0f */
1778# else
1779 STRENTRY("Reserved0 "), /* 0x00 */
1780 STRENTRY("TSS16Avail "), /* 0x01 */
1781 STRENTRY("LDT "), /* 0x02 */
1782 STRENTRY("TSS16Busy "), /* 0x03 */
1783 STRENTRY("Call16 "), /* 0x04 */
1784 STRENTRY("Task "), /* 0x05 */
1785 STRENTRY("Int16 "), /* 0x06 */
1786 STRENTRY("Trap16 "), /* 0x07 */
1787 STRENTRY("Reserved8 "), /* 0x08 */
1788 STRENTRY("TSS32Avail "), /* 0x09 */
1789 STRENTRY("ReservedA "), /* 0x0a */
1790 STRENTRY("TSS32Busy "), /* 0x0b */
1791 STRENTRY("Call32 "), /* 0x0c */
1792 STRENTRY("ReservedD "), /* 0x0d */
1793 STRENTRY("Int32 "), /* 0x0e */
1794 STRENTRY("Trap32 "), /* 0x0f */
1795# endif
1796 /* non system */
1797 STRENTRY("DataRO "), /* 0x10 */
1798 STRENTRY("DataRO Accessed "), /* 0x11 */
1799 STRENTRY("DataRW "), /* 0x12 */
1800 STRENTRY("DataRW Accessed "), /* 0x13 */
1801 STRENTRY("DataDownRO "), /* 0x14 */
1802 STRENTRY("DataDownRO Accessed "), /* 0x15 */
1803 STRENTRY("DataDownRW "), /* 0x16 */
1804 STRENTRY("DataDownRW Accessed "), /* 0x17 */
1805 STRENTRY("CodeEO "), /* 0x18 */
1806 STRENTRY("CodeEO Accessed "), /* 0x19 */
1807 STRENTRY("CodeER "), /* 0x1a */
1808 STRENTRY("CodeER Accessed "), /* 0x1b */
1809 STRENTRY("CodeConfEO "), /* 0x1c */
1810 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
1811 STRENTRY("CodeConfER "), /* 0x1e */
1812 STRENTRY("CodeConfER Accessed ") /* 0x1f */
1813# undef SYSENTRY
1814 };
1815# define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
1816 char szMsg[128];
1817 char *psz = &szMsg[0];
1818 unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
1819 memcpy(psz, s_aTypes[i].psz, s_aTypes[i].cch);
1820 psz += s_aTypes[i].cch;
1821
1822 if (pDesc->Gen.u1Present)
1823 ADD_STR(psz, "Present ");
1824 else
1825 ADD_STR(psz, "Not-Present ");
1826# if HC_ARCH_BITS == 64
1827 if (pDesc->Gen.u1Long)
1828 ADD_STR(psz, "64-bit ");
1829 else
1830 ADD_STR(psz, "Comp ");
1831# else
1832 if (pDesc->Gen.u1Granularity)
1833 ADD_STR(psz, "Page ");
1834 if (pDesc->Gen.u1DefBig)
1835 ADD_STR(psz, "32-bit ");
1836 else
1837 ADD_STR(psz, "16-bit ");
1838# endif
1839# undef ADD_STR
1840 *psz = '\0';
1841
1842 /*
1843 * Limit and Base and format the output.
1844 */
1845#ifdef LOG_ENABLED
1846 uint32_t u32Limit = X86DESC_LIMIT_G(pDesc);
1847
1848# if HC_ARCH_BITS == 64
1849 uint64_t const u64Base = X86DESC64_BASE(pDesc);
1850 Log((" %s { %#04x - %#RX64 %#RX64 - base=%#RX64 limit=%#08x dpl=%d } %s\n", pszSel,
1851 Sel, pDesc->au64[0], pDesc->au64[1], u64Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1852# else
1853 uint32_t const u32Base = X86DESC_BASE(pDesc);
1854 Log((" %s { %#04x - %#08x %#08x - base=%#08x limit=%#08x dpl=%d } %s\n", pszSel,
1855 Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1856# endif
1857#else
1858 NOREF(Sel); NOREF(pszSel);
1859#endif
1860}
1861
1862
1863/**
1864 * Formats a full register dump.
1865 *
1866 * @param pVCpu The cross context virtual CPU structure.
1867 * @param fFlags The dumping flags (HM_DUMP_REG_FLAGS_XXX).
1868 */
1869VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPUCC pVCpu, uint32_t fFlags)
1870{
1871 /*
1872 * Format the flags.
1873 */
1874 static struct
1875 {
1876 const char *pszSet;
1877 const char *pszClear;
1878 uint32_t fFlag;
1879 } const s_aFlags[] =
1880 {
1881 { "vip", NULL, X86_EFL_VIP },
1882 { "vif", NULL, X86_EFL_VIF },
1883 { "ac", NULL, X86_EFL_AC },
1884 { "vm", NULL, X86_EFL_VM },
1885 { "rf", NULL, X86_EFL_RF },
1886 { "nt", NULL, X86_EFL_NT },
1887 { "ov", "nv", X86_EFL_OF },
1888 { "dn", "up", X86_EFL_DF },
1889 { "ei", "di", X86_EFL_IF },
1890 { "tf", NULL, X86_EFL_TF },
1891 { "nt", "pl", X86_EFL_SF },
1892 { "nz", "zr", X86_EFL_ZF },
1893 { "ac", "na", X86_EFL_AF },
1894 { "po", "pe", X86_EFL_PF },
1895 { "cy", "nc", X86_EFL_CF },
1896 };
1897 char szEFlags[80];
1898 char *psz = szEFlags;
1899 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1900 uint32_t fEFlags = pCtx->eflags.u;
1901 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1902 {
1903 const char *pszAdd = s_aFlags[i].fFlag & fEFlags ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1904 if (pszAdd)
1905 {
1906 strcpy(psz, pszAdd);
1907 psz += strlen(pszAdd);
1908 *psz++ = ' ';
1909 }
1910 }
1911 psz[-1] = '\0';
1912
1913 if (fFlags & HM_DUMP_REG_FLAGS_GPRS)
1914 {
1915 /*
1916 * Format the registers.
1917 */
1918 if (CPUMIsGuestIn64BitCode(pVCpu))
1919 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
1920 "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
1921 "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1922 "r14=%016RX64 r15=%016RX64\n"
1923 "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
1924 "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1925 "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1926 "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1927 "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1928 "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1929 "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1930 "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
1931 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
1932 "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1933 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1934 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1935 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1936 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1937 ,
1938 pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
1939 pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
1940 pCtx->r14, pCtx->r15,
1941 pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(fEFlags), 31, szEFlags,
1942 pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
1943 pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
1944 pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
1945 pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
1946 pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
1947 pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
1948 pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
1949 pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
1950 pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7],
1951 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, fEFlags,
1952 pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1953 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1954 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1955 else
1956 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1957 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1958 "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1959 "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1960 "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1961 "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1962 "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1963 "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1964 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1965 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1966 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1967 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1968 ,
1969 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1970 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(fEFlags), 31, szEFlags,
1971 pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pCtx->dr[0], pCtx->dr[1],
1972 pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pCtx->dr[2], pCtx->dr[3],
1973 pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pCtx->dr[4], pCtx->dr[5],
1974 pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pCtx->dr[6], pCtx->dr[7],
1975 pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pCtx->cr0, pCtx->cr2,
1976 pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pCtx->cr3, pCtx->cr4,
1977 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, fEFlags,
1978 pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1979 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1980 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1981 }
1982
1983 if (fFlags & HM_DUMP_REG_FLAGS_FPU)
1984 {
1985 PCX86FXSTATE pFpuCtx = &pCtx->XState.x87;
1986 Log(("FPU:\n"
1987 "FCW=%04x FSW=%04x FTW=%02x\n"
1988 "FOP=%04x FPUIP=%08x CS=%04x Rsrvd1=%04x\n"
1989 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
1990 ,
1991 pFpuCtx->FCW, pFpuCtx->FSW, pFpuCtx->FTW,
1992 pFpuCtx->FOP, pFpuCtx->FPUIP, pFpuCtx->CS, pFpuCtx->Rsrvd1,
1993 pFpuCtx->FPUDP, pFpuCtx->DS, pFpuCtx->Rsrvd2,
1994 pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK));
1995 NOREF(pFpuCtx);
1996 }
1997
1998 if (fFlags & HM_DUMP_REG_FLAGS_MSRS)
1999 Log(("MSR:\n"
2000 "EFER =%016RX64\n"
2001 "PAT =%016RX64\n"
2002 "STAR =%016RX64\n"
2003 "CSTAR =%016RX64\n"
2004 "LSTAR =%016RX64\n"
2005 "SFMASK =%016RX64\n"
2006 "KERNELGSBASE =%016RX64\n",
2007 pCtx->msrEFER,
2008 pCtx->msrPAT,
2009 pCtx->msrSTAR,
2010 pCtx->msrCSTAR,
2011 pCtx->msrLSTAR,
2012 pCtx->msrSFMASK,
2013 pCtx->msrKERNELGSBASE));
2014}
2015
2016#endif /* VBOX_STRICT */
2017
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette