VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 8876

Last change on this file since 8876 was 8876, checked in by vboxsync, 17 years ago

ASID based TLB flushing

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.3 KB
Line 
1/* $Id: HWACCMR0.cpp 8876 2008-05-16 09:59:07Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include "HWVMXR0.h"
46#include "HWSVMR0.h"
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
55
56/*******************************************************************************
57* Local Variables *
58*******************************************************************************/
59
60static struct
61{
62 HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
63
64 struct
65 {
66 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
67 bool fSupported;
68
69 /** Host CR4 value (set by ring-0 VMX init) */
70 uint64_t hostCR4;
71
72 /** VMX MSR values */
73 struct
74 {
75 uint64_t feature_ctrl;
76 uint64_t vmx_basic_info;
77 uint64_t vmx_pin_ctls;
78 uint64_t vmx_proc_ctls;
79 uint64_t vmx_exit;
80 uint64_t vmx_entry;
81 uint64_t vmx_misc;
82 uint64_t vmx_cr0_fixed0;
83 uint64_t vmx_cr0_fixed1;
84 uint64_t vmx_cr4_fixed0;
85 uint64_t vmx_cr4_fixed1;
86 uint64_t vmx_vmcs_enum;
87 } msr;
88 /* Last instruction error */
89 uint32_t ulLastInstrError;
90 } vmx;
91 struct
92 {
93 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
94 bool fSupported;
95
96 /** SVM revision. */
97 uint32_t u32Rev;
98
99 /** Maximum ASID allowed. */
100 uint32_t u32MaxASID;
101
102 /** SVM feature bits from cpuid 0x8000000a */
103 uint32_t u32Features;
104 } svm;
105 /** Saved error from detection */
106 int32_t lLastError;
107
108 struct
109 {
110 uint32_t u32AMDFeatureECX;
111 uint32_t u32AMDFeatureEDX;
112 } cpuid;
113
114 HWACCMSTATE enmHwAccmState;
115} HWACCMR0Globals;
116
117
118
119/**
120 * Does global Ring-0 HWACCM initialization.
121 *
122 * @returns VBox status code.
123 */
124HWACCMR0DECL(int) HWACCMR0Init()
125{
126 int rc;
127
128 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
129 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
130
131#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
132
133 /*
134 * Check for VT-x and AMD-V capabilities
135 */
136 if (ASMHasCpuId())
137 {
138 uint32_t u32FeaturesECX;
139 uint32_t u32Dummy;
140 uint32_t u32FeaturesEDX;
141 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
142
143 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
144 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
145 /* Query AMD features. */
146 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
147
148 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
149 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
150 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
151 )
152 {
153 /*
154 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
155 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
156 */
157 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
158 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
159 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
160 )
161 {
162 int aRc[RTCPUSET_MAX_CPUS];
163 RTCPUID idCpu = 0;
164
165 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
166
167 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
168 memset(aRc, 0, sizeof(aRc));
169 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
170
171 /* Check the return code of all invocations. */
172 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
173 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
174
175 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
176 {
177 /* Reread in case we've changed it. */
178 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
179
180 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
181 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
182 {
183 RTR0MEMOBJ pScatchMemObj;
184 void *pvScatchPage;
185 RTHCPHYS pScatchPagePhys;
186
187 HWACCMR0Globals.vmx.fSupported = true;
188 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
189 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
190 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
191 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
192 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
193 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
194 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
195 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
196 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
197 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
198 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
199 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
200
201 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
202 if (RT_FAILURE(rc))
203 return rc;
204
205 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
206 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
207 memset(pvScatchPage, 0, PAGE_SIZE);
208
209 /* Set revision dword at the beginning of the structure. */
210 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
211
212 /* Make sure we don't get rescheduled to another cpu during this probe. */
213 RTCCUINTREG fFlags = ASMIntDisableFlags();
214
215 /*
216 * Check CR4.VMXE
217 */
218 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
219 {
220 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
221 * try to execute the VMX instructions...
222 */
223 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
224 }
225
226 /* Enter VMX Root Mode */
227 rc = VMXEnable(pScatchPagePhys);
228 if (VBOX_FAILURE(rc))
229 {
230 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
231 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
232 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
233 *
234 * They should fix their code, but until they do we simply refuse to run.
235 */
236 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
237 HWACCMR0Globals.vmx.fSupported = false;
238 }
239 else
240 VMXDisable();
241
242 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
243 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
244 ASMSetFlags(fFlags);
245
246 RTR0MemObjFree(pScatchMemObj, false);
247 if (VBOX_FAILURE(HWACCMR0Globals.lLastError))
248 return HWACCMR0Globals.lLastError ;
249 }
250 else
251 {
252 AssertFailed(); /* can't hit this case anymore */
253 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
254 }
255 }
256#ifdef LOG_ENABLED
257 else
258 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
259#endif
260 }
261 else
262 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
263 }
264 else
265 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
266 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
267 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
268 )
269 {
270 /*
271 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
272 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
273 */
274 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
275 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
276 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
277 )
278 {
279 int aRc[RTCPUSET_MAX_CPUS];
280 RTCPUID idCpu = 0;
281
282 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
283 memset(aRc, 0, sizeof(aRc));
284 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
285 AssertRC(rc);
286
287 /* Check the return code of all invocations. */
288 if (VBOX_SUCCESS(rc))
289 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
290
291 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
292
293 if (VBOX_SUCCESS(rc))
294 {
295 /* Query AMD features. */
296 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
297
298 HWACCMR0Globals.svm.fSupported = true;
299 }
300 else
301 HWACCMR0Globals.lLastError = rc;
302 }
303 else
304 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
305 }
306 else
307 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
308 }
309 else
310 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
311
312#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
313
314 return VINF_SUCCESS;
315}
316
317
318/**
319 * Checks the error code array filled in for each cpu in the system.
320 *
321 * @returns VBox status code.
322 * @param paRc Error code array
323 * @param cErrorCodes Array size
324 * @param pidCpu Value of the first cpu that set an error (out)
325 */
326static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
327{
328 int rc = VINF_SUCCESS;
329
330 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
331
332 for (unsigned i=0;i<cErrorCodes;i++)
333 {
334 if (RTMpIsCpuOnline(i))
335 {
336 if (VBOX_FAILURE(paRc[i]))
337 {
338 rc = paRc[i];
339 *pidCpu = i;
340 break;
341 }
342 }
343 }
344 return rc;
345}
346
347/**
348 * Does global Ring-0 HWACCM termination.
349 *
350 * @returns VBox status code.
351 */
352HWACCMR0DECL(int) HWACCMR0Term()
353{
354 int aRc[RTCPUSET_MAX_CPUS];
355
356 memset(aRc, 0, sizeof(aRc));
357 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
358 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
359
360 /* Free the per-cpu pages used for VT-x and AMD-V */
361 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
362 {
363 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
364 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
365 {
366 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
367 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
368 }
369 }
370 return rc;
371}
372
373
374/**
375 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
376 * is to be called on the target cpus.
377 *
378 * @param idCpu The identifier for the CPU the function is called on.
379 * @param pvUser1 The 1st user argument.
380 * @param pvUser2 The 2nd user argument.
381 */
382static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
383{
384 unsigned u32VendorEBX = (uintptr_t)pvUser1;
385 int *paRc = (int *)pvUser2;
386 uint64_t val;
387
388#ifdef LOG_ENABLED
389 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
390#endif
391 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
392
393 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
394 {
395 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
396
397 /*
398 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
399 * Once the lock bit is set, this MSR can no longer be modified.
400 */
401 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
402 {
403 /* MSR is not yet locked; we can change it ourselves here */
404 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
405 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
406 }
407 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
408 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
409 paRc[idCpu] = VINF_SUCCESS;
410 else
411 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
412 }
413 else
414 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
415 {
416 /* Check if SVM is disabled */
417 val = ASMRdMsr(MSR_K8_VM_CR);
418 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
419 {
420 /* Turn on SVM in the EFER MSR. */
421 val = ASMRdMsr(MSR_K6_EFER);
422 if (!(val & MSR_K6_EFER_SVME))
423 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
424
425 /* Paranoia. */
426 val = ASMRdMsr(MSR_K6_EFER);
427 if (val & MSR_K6_EFER_SVME)
428 paRc[idCpu] = VINF_SUCCESS;
429 else
430 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
431 }
432 else
433 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
434 }
435 else
436 AssertFailed(); /* can't happen */
437 return;
438}
439
440
441/**
442 * Sets up HWACCM on all cpus.
443 *
444 * @returns VBox status code.
445 * @param pVM The VM to operate on.
446 * @param enmNewHwAccmState New hwaccm state
447 *
448 */
449HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
450{
451 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
452 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
453 {
454 int aRc[RTCPUSET_MAX_CPUS];
455 RTCPUID idCpu = 0;
456
457 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
458 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
459 return VINF_SUCCESS;
460
461 memset(aRc, 0, sizeof(aRc));
462
463 /* Allocate one page per cpu for the global vt-x and amd-v pages */
464 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
465 {
466 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
467
468 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
469 if (RTMpIsCpuOnline(i))
470 {
471 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
472 AssertRC(rc);
473 if (RT_FAILURE(rc))
474 return rc;
475
476 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
477 Assert(pvR0);
478 memset(pvR0, 0, PAGE_SIZE);
479
480#ifdef LOG_ENABLED
481 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
482#endif
483 }
484 }
485 /* First time, so initialize each cpu/core */
486 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
487
488 /* Check the return code of all invocations. */
489 if (VBOX_SUCCESS(rc))
490 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
491
492 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
493 return rc;
494 }
495
496 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
497 return VINF_SUCCESS;
498
499 /* Request to change the mode is not allowed */
500 return VERR_ACCESS_DENIED;
501}
502
503/**
504 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
505 * is to be called on the target cpus.
506 *
507 * @param idCpu The identifier for the CPU the function is called on.
508 * @param pvUser1 The 1st user argument.
509 * @param pvUser2 The 2nd user argument.
510 */
511static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
512{
513 PVM pVM = (PVM)pvUser1;
514 int *paRc = (int *)pvUser2;
515 void *pvPageCpu;
516 RTHCPHYS pPageCpuPhys;
517
518 Assert(pVM);
519 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
520 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
521
522 /* Should never happen */
523 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
524 {
525 AssertFailed();
526 return;
527 }
528
529 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
530 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
531
532 if (pVM->hwaccm.s.vmx.fSupported)
533 {
534 paRc[idCpu] = VMXR0EnableCpu(&HWACCMR0Globals.aCpuInfo[idCpu], pVM, pvPageCpu, pPageCpuPhys);
535 AssertRC(paRc[idCpu]);
536 if (VBOX_SUCCESS(paRc[idCpu]))
537 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
538 }
539 else
540 if (pVM->hwaccm.s.svm.fSupported)
541 {
542 paRc[idCpu] = SVMR0EnableCpu(&HWACCMR0Globals.aCpuInfo[idCpu], pVM, pvPageCpu, pPageCpuPhys);
543 AssertRC(paRc[idCpu]);
544 if (VBOX_SUCCESS(paRc[idCpu]))
545 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
546 }
547 return;
548}
549
550/**
551 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
552 * is to be called on the target cpus.
553 *
554 * @param idCpu The identifier for the CPU the function is called on.
555 * @param pvUser1 The 1st user argument.
556 * @param pvUser2 The 2nd user argument.
557 */
558static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
559{
560 void *pvPageCpu;
561 RTHCPHYS pPageCpuPhys;
562 int *paRc = (int *)pvUser1;
563
564 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
565 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
566
567 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
568 return;
569
570 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
571 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
572
573 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
574 {
575 paRc[idCpu] = VMXR0DisableCpu(&HWACCMR0Globals.aCpuInfo[idCpu], pvPageCpu, pPageCpuPhys);
576 AssertRC(paRc[idCpu]);
577 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
578 }
579 else
580 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
581 {
582 paRc[idCpu] = SVMR0DisableCpu(&HWACCMR0Globals.aCpuInfo[idCpu], pvPageCpu, pPageCpuPhys);
583 AssertRC(paRc[idCpu]);
584 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
585 }
586 return;
587}
588
589
590/**
591 * Does Ring-0 per VM HWACCM initialization.
592 *
593 * This is mainly to check that the Host CPU mode is compatible
594 * with VMX.
595 *
596 * @returns VBox status code.
597 * @param pVM The VM to operate on.
598 */
599HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
600{
601 int rc = VINF_SUCCESS;
602
603 AssertReturn(pVM, VERR_INVALID_PARAMETER);
604
605#ifdef LOG_ENABLED
606 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
607#endif
608
609 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
610 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
611
612 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
613 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
614 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
615 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
616 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
617 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
618 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
619 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
620 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
621 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
622 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
623 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
624 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
625 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
626 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
627 pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features;
628 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
629 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
630 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
631
632 /* Init a VT-x or AMD-V VM. */
633 if (pVM->hwaccm.s.vmx.fSupported)
634 rc = VMXR0InitVM(pVM);
635 else
636 if (pVM->hwaccm.s.svm.fSupported)
637 rc = SVMR0InitVM(pVM);
638
639 return rc;
640}
641
642
643/**
644 * Does Ring-0 per VM HWACCM termination.
645 *
646 * @returns VBox status code.
647 * @param pVM The VM to operate on.
648 */
649HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
650{
651 int rc = VINF_SUCCESS;
652
653 AssertReturn(pVM, VERR_INVALID_PARAMETER);
654
655#ifdef LOG_ENABLED
656 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
657#endif
658
659 /* Terminate a VT-x or AMD-V VM. */
660 if (pVM->hwaccm.s.vmx.fSupported)
661 rc = VMXR0TermVM(pVM);
662 else
663 if (pVM->hwaccm.s.svm.fSupported)
664 rc = SVMR0TermVM(pVM);
665
666 return rc;
667}
668
669
670/**
671 * Sets up a VT-x or AMD-V session
672 *
673 * @returns VBox status code.
674 * @param pVM The VM to operate on.
675 */
676HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
677{
678 int rc = VINF_SUCCESS;
679
680 AssertReturn(pVM, VERR_INVALID_PARAMETER);
681
682#ifdef LOG_ENABLED
683 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
684#endif
685
686 /* Setup VT-x or AMD-V. */
687 if (pVM->hwaccm.s.vmx.fSupported)
688 rc = VMXR0SetupVM(pVM);
689 else
690 if (pVM->hwaccm.s.svm.fSupported)
691 rc = SVMR0SetupVM(pVM);
692
693 return rc;
694}
695
696
697/**
698 * Enters the VT-x or AMD-V session
699 *
700 * @returns VBox status code.
701 * @param pVM The VM to operate on.
702 */
703HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
704{
705 CPUMCTX *pCtx;
706 int rc;
707
708 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
709 if (VBOX_FAILURE(rc))
710 return rc;
711
712 /* Always load the guest's FPU/XMM state on-demand. */
713 CPUMDeactivateGuestFPUState(pVM);
714
715 /* Always reload the host context and the guest's CR0 register. (!!!!) */
716 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
717
718 if (pVM->hwaccm.s.vmx.fSupported)
719 {
720 rc = VMXR0Enter(pVM);
721 AssertRC(rc);
722 rc |= VMXR0SaveHostState(pVM);
723 AssertRC(rc);
724 rc |= VMXR0LoadGuestState(pVM, pCtx);
725 AssertRC(rc);
726 if (rc != VINF_SUCCESS)
727 return rc;
728 }
729 else
730 {
731 Assert(pVM->hwaccm.s.svm.fSupported);
732 rc = SVMR0Enter(pVM);
733 AssertRC(rc);
734 rc |= SVMR0LoadGuestState(pVM, pCtx);
735 AssertRC(rc);
736 if (rc != VINF_SUCCESS)
737 return rc;
738
739 }
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Leaves the VT-x or AMD-V session
746 *
747 * @returns VBox status code.
748 * @param pVM The VM to operate on.
749 */
750HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
751{
752 CPUMCTX *pCtx;
753 int rc;
754
755 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
756 if (VBOX_FAILURE(rc))
757 return rc;
758
759 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
760 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
761 * or trash somebody else's FPU state.
762 */
763
764 /* Restore host FPU and XMM state if necessary. */
765 if (CPUMIsGuestFPUStateActive(pVM))
766 {
767 Log2(("CPUMRestoreHostFPUState\n"));
768 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
769 CPUMRestoreHostFPUState(pVM);
770
771 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
772 }
773
774 if (pVM->hwaccm.s.vmx.fSupported)
775 {
776 return VMXR0Leave(pVM);
777 }
778 else
779 {
780 Assert(pVM->hwaccm.s.svm.fSupported);
781 return SVMR0Leave(pVM);
782 }
783}
784
785/**
786 * Runs guest code in a hardware accelerated VM.
787 *
788 * @returns VBox status code.
789 * @param pVM The VM to operate on.
790 */
791HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
792{
793 CPUMCTX *pCtx;
794 int rc;
795 RTCPUID idCpu = RTMpCpuId();
796
797 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
798 if (VBOX_FAILURE(rc))
799 return rc;
800
801 if (pVM->hwaccm.s.vmx.fSupported)
802 {
803 return VMXR0RunGuestCode(pVM, pCtx, &HWACCMR0Globals.aCpuInfo[idCpu]);
804 }
805 else
806 {
807 Assert(pVM->hwaccm.s.svm.fSupported);
808 return SVMR0RunGuestCode(pVM, pCtx, &HWACCMR0Globals.aCpuInfo[idCpu]);
809 }
810}
811
812/**
813 * Invalidates a guest page
814 *
815 * @returns VBox status code.
816 * @param pVM The VM to operate on.
817 * @param GCVirt Page to invalidate
818 */
819HWACCMR0DECL(int) HWACCMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
820{
821 if (pVM->hwaccm.s.svm.fSupported)
822 return SVMR0InvalidatePage(pVM, GCVirt);
823
824 return VINF_SUCCESS;
825}
826
827/**
828 * Flushes the guest TLB
829 *
830 * @returns VBox status code.
831 * @param pVM The VM to operate on.
832 */
833HWACCMR0DECL(int) HWACCMR0FlushTLB(PVM pVM)
834{
835 if (pVM->hwaccm.s.svm.fSupported)
836 return SVMR0FlushTLB(pVM);
837
838 return VINF_SUCCESS;
839}
840
841
842#ifdef VBOX_STRICT
843#include <iprt/string.h>
844/**
845 * Dumps a descriptor.
846 *
847 * @param Desc Descriptor to dump.
848 * @param Sel Selector number.
849 * @param pszMsg Message to prepend the log entry with.
850 */
851HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
852{
853 /*
854 * Make variable description string.
855 */
856 static struct
857 {
858 unsigned cch;
859 const char *psz;
860 } const aTypes[32] =
861 {
862 #define STRENTRY(str) { sizeof(str) - 1, str }
863
864 /* system */
865#if HC_ARCH_BITS == 64
866 STRENTRY("Reserved0 "), /* 0x00 */
867 STRENTRY("Reserved1 "), /* 0x01 */
868 STRENTRY("LDT "), /* 0x02 */
869 STRENTRY("Reserved3 "), /* 0x03 */
870 STRENTRY("Reserved4 "), /* 0x04 */
871 STRENTRY("Reserved5 "), /* 0x05 */
872 STRENTRY("Reserved6 "), /* 0x06 */
873 STRENTRY("Reserved7 "), /* 0x07 */
874 STRENTRY("Reserved8 "), /* 0x08 */
875 STRENTRY("TSS64Avail "), /* 0x09 */
876 STRENTRY("ReservedA "), /* 0x0a */
877 STRENTRY("TSS64Busy "), /* 0x0b */
878 STRENTRY("Call64 "), /* 0x0c */
879 STRENTRY("ReservedD "), /* 0x0d */
880 STRENTRY("Int64 "), /* 0x0e */
881 STRENTRY("Trap64 "), /* 0x0f */
882#else
883 STRENTRY("Reserved0 "), /* 0x00 */
884 STRENTRY("TSS16Avail "), /* 0x01 */
885 STRENTRY("LDT "), /* 0x02 */
886 STRENTRY("TSS16Busy "), /* 0x03 */
887 STRENTRY("Call16 "), /* 0x04 */
888 STRENTRY("Task "), /* 0x05 */
889 STRENTRY("Int16 "), /* 0x06 */
890 STRENTRY("Trap16 "), /* 0x07 */
891 STRENTRY("Reserved8 "), /* 0x08 */
892 STRENTRY("TSS32Avail "), /* 0x09 */
893 STRENTRY("ReservedA "), /* 0x0a */
894 STRENTRY("TSS32Busy "), /* 0x0b */
895 STRENTRY("Call32 "), /* 0x0c */
896 STRENTRY("ReservedD "), /* 0x0d */
897 STRENTRY("Int32 "), /* 0x0e */
898 STRENTRY("Trap32 "), /* 0x0f */
899#endif
900 /* non system */
901 STRENTRY("DataRO "), /* 0x10 */
902 STRENTRY("DataRO Accessed "), /* 0x11 */
903 STRENTRY("DataRW "), /* 0x12 */
904 STRENTRY("DataRW Accessed "), /* 0x13 */
905 STRENTRY("DataDownRO "), /* 0x14 */
906 STRENTRY("DataDownRO Accessed "), /* 0x15 */
907 STRENTRY("DataDownRW "), /* 0x16 */
908 STRENTRY("DataDownRW Accessed "), /* 0x17 */
909 STRENTRY("CodeEO "), /* 0x18 */
910 STRENTRY("CodeEO Accessed "), /* 0x19 */
911 STRENTRY("CodeER "), /* 0x1a */
912 STRENTRY("CodeER Accessed "), /* 0x1b */
913 STRENTRY("CodeConfEO "), /* 0x1c */
914 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
915 STRENTRY("CodeConfER "), /* 0x1e */
916 STRENTRY("CodeConfER Accessed ") /* 0x1f */
917 #undef SYSENTRY
918 };
919 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
920 char szMsg[128];
921 char *psz = &szMsg[0];
922 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
923 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
924 psz += aTypes[i].cch;
925
926 if (Desc->Gen.u1Present)
927 ADD_STR(psz, "Present ");
928 else
929 ADD_STR(psz, "Not-Present ");
930#if HC_ARCH_BITS == 64
931 if (Desc->Gen.u1Long)
932 ADD_STR(psz, "64-bit ");
933 else
934 ADD_STR(psz, "Comp ");
935#else
936 if (Desc->Gen.u1Granularity)
937 ADD_STR(psz, "Page ");
938 if (Desc->Gen.u1DefBig)
939 ADD_STR(psz, "32-bit ");
940 else
941 ADD_STR(psz, "16-bit ");
942#endif
943 #undef ADD_STR
944 *psz = '\0';
945
946 /*
947 * Limit and Base and format the output.
948 */
949 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
950 if (Desc->Gen.u1Granularity)
951 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
952
953#if HC_ARCH_BITS == 64
954 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
955
956 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
957 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
958#else
959 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
960
961 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
962 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
963#endif
964}
965
966/**
967 * Formats a full register dump.
968 *
969 * @param pCtx The context to format.
970 */
971HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
972{
973 /*
974 * Format the flags.
975 */
976 static struct
977 {
978 const char *pszSet; const char *pszClear; uint32_t fFlag;
979 } aFlags[] =
980 {
981 { "vip",NULL, X86_EFL_VIP },
982 { "vif",NULL, X86_EFL_VIF },
983 { "ac", NULL, X86_EFL_AC },
984 { "vm", NULL, X86_EFL_VM },
985 { "rf", NULL, X86_EFL_RF },
986 { "nt", NULL, X86_EFL_NT },
987 { "ov", "nv", X86_EFL_OF },
988 { "dn", "up", X86_EFL_DF },
989 { "ei", "di", X86_EFL_IF },
990 { "tf", NULL, X86_EFL_TF },
991 { "nt", "pl", X86_EFL_SF },
992 { "nz", "zr", X86_EFL_ZF },
993 { "ac", "na", X86_EFL_AF },
994 { "po", "pe", X86_EFL_PF },
995 { "cy", "nc", X86_EFL_CF },
996 };
997 char szEFlags[80];
998 char *psz = szEFlags;
999 uint32_t efl = pCtx->eflags.u32;
1000 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
1001 {
1002 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
1003 if (pszAdd)
1004 {
1005 strcpy(psz, pszAdd);
1006 psz += strlen(pszAdd);
1007 *psz++ = ' ';
1008 }
1009 }
1010 psz[-1] = '\0';
1011
1012
1013 /*
1014 * Format the registers.
1015 */
1016 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1017 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1018 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1019 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1020 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1021 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1022 ,
1023 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1024 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1025 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
1026 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
1027 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
1028 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
1029
1030 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1031 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1032 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
1033 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
1034 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
1035 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1036 "FCW=%04x FSW=%04x FTW=%04x\n",
1037 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1038 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1039 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1040 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1041 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1042 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1043 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
1044
1045
1046}
1047#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette