Changeset 74389 in vbox
- Timestamp:
- Sep 20, 2018 4:25:26 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
-
include/VBox/err.h (modified) (1 diff)
-
include/VBox/vmm/cpumctx.h (modified) (2 diffs)
-
include/VBox/vmm/hm.h (modified) (1 diff)
-
include/VBox/vmm/hm_vmx.h (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/HMVMXAll.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h (modified) (10 diffs)
-
src/VBox/VMM/VMMR3/CPUM.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r74043 r74389 2102 2102 /** Generic VM-entry failure. */ 2103 2103 #define VERR_VMX_VMENTRY_FAILED (-4033) 2104 /** Generic VM-exit failure. */ 2105 #define VERR_VMX_VMEXIT_FAILED (-4034) 2104 2106 /** @} */ 2105 2107 -
trunk/include/VBox/vmm/cpumctx.h
r74337 r74389 548 548 /** 0x2f8 - Last emulated VMX instruction/VM-exit diagnostic. */ 549 549 VMXVDIAG enmDiag; 550 /** 0x2fc - Whether the guest is in VMX root mode. */ 550 /** 0x2fc - VMX abort reason. */ 551 VMXABORT enmAbort; 552 /** 0x300 - VMX abort auxiliary information field. */ 553 uint32_t uAbortAux; 554 /** 0x304 - Whether the guest is in VMX root mode. */ 551 555 bool fInVmxRootMode; 552 /** 0x 2fd- Whether the guest is in VMX non-root mode. */556 /** 0x305 - Whether the guest is in VMX non-root mode. */ 553 557 bool fInVmxNonRootMode; 554 /** 0x 2fe- Whether the injected events are subjected to event intercepts. */558 /** 0x306 - Whether the injected events are subjected to event intercepts. */ 555 559 bool fInterceptEvents; 556 bool fPadding0;557 /** 0x30 0- Cache of the nested-guest current VMCS - R0 ptr. */560 bool afPadding0[1]; 561 /** 0x308 - Cache of the nested-guest current VMCS - R0 ptr. */ 558 562 R0PTRTYPE(PVMXVVMCS) pVmcsR0; 559 563 #if HC_ARCH_BITS == 32 560 564 uint32_t uVmcsR0Padding; 561 565 #endif 562 /** 0x3 08- Cache of the nested-guest curent VMCS - R3 ptr. */566 /** 0x310 - Cache of the nested-guest curent VMCS - R3 ptr. */ 563 567 R3PTRTYPE(PVMXVVMCS) pVmcsR3; 564 568 #if HC_ARCH_BITS == 32 565 569 uint32_t uVmcsR3Padding; 566 570 #endif 567 /** 0X31 0- Cache of the nested-guest shadow VMCS - R0 ptr. */571 /** 0X318 - Cache of the nested-guest shadow VMCS - R0 ptr. */ 568 572 R0PTRTYPE(PVMXVVMCS) pShadowVmcsR0; 569 573 #if HC_ARCH_BITS == 32 570 574 uint32_t uShadowVmcsR0Padding; 571 575 #endif 572 /** 0x3 18- Cache of the nested-guest shadow VMCS - R3 ptr. */576 /** 0x320 - Cache of the nested-guest shadow VMCS - R3 ptr. */ 573 577 R3PTRTYPE(PVMXVVMCS) pShadowVmcsR3; 574 578 #if HC_ARCH_BITS == 32 575 579 uint32_t uShadowVmcsR3Padding; 576 580 #endif 577 /** 0x32 0- Cache of the nested-guest Virtual-APIC page - R0 ptr. */581 /** 0x328 - Cache of the nested-guest Virtual-APIC page - R0 ptr. */ 578 582 R0PTRTYPE(void *) pvVirtApicPageR0; 579 583 #if HC_ARCH_BITS == 32 580 584 uint32_t uVirtApicPageR0Padding; 581 585 #endif 582 /** 0x3 28- Cache of the nested-guest Virtual-APIC page - R3 ptr. */586 /** 0x330 - Cache of the nested-guest Virtual-APIC page - R3 ptr. */ 583 587 R3PTRTYPE(void *) pvVirtApicPageR3; 584 588 #if HC_ARCH_BITS == 32 585 589 uint32_t uVirtApicPageR3Padding; 586 590 #endif 587 /** 0x33 0- Cache of the nested-guest VMREAD-bitmap - R0 ptr. */591 /** 0x338 - Cache of the nested-guest VMREAD-bitmap - R0 ptr. */ 588 592 R0PTRTYPE(void *) pvVmreadBitmapR0; 589 593 #if HC_ARCH_BITS == 32 590 594 uint32_t uVmreadBitmapR0Padding; 591 595 #endif 592 /** 0x3 38- Cache of the nested-guest VMREAD-bitmap - R3 ptr. */596 /** 0x340 - Cache of the nested-guest VMREAD-bitmap - R3 ptr. */ 593 597 R3PTRTYPE(void *) pvVmreadBitmapR3; 594 598 #if HC_ARCH_BITS == 32 595 599 uint32_t uVmreadBitmapR3Padding; 596 600 #endif 597 /** 0x34 0- Cache of the nested-guest VMWRITE-bitmap - R0 ptr. */601 /** 0x348 - Cache of the nested-guest VMWRITE-bitmap - R0 ptr. */ 598 602 R0PTRTYPE(void *) pvVmwriteBitmapR0; 599 603 #if HC_ARCH_BITS == 32 600 604 uint32_t uVmwriteBitmapR0Padding; 601 605 #endif 602 /** 0x3 48- Cache of the nested-guest VMWRITE-bitmap - R3 ptr. */606 /** 0x350 - Cache of the nested-guest VMWRITE-bitmap - R3 ptr. */ 603 607 R3PTRTYPE(void *) pvVmwriteBitmapR3; 604 608 #if HC_ARCH_BITS == 32 605 609 uint32_t uVmwriteBitmapR3Padding; 606 610 #endif 607 /** 0x35 0- The MSR auto-load/store area - R0 ptr. */611 /** 0x358 - The MSR auto-load/store area - R0 ptr. */ 608 612 R0PTRTYPE(PVMXAUTOMSR) pAutoMsrAreaR0; 609 613 #if HC_ARCH_BITS == 32 610 614 uint32_t uAutoMsrAreaR0; 611 615 #endif 612 /** 0x3 58- The MSR auto-load/store area - R3 ptr. */616 /** 0x360 - The MSR auto-load/store area - R3 ptr. */ 613 617 R3PTRTYPE(PVMXAUTOMSR) pAutoMsrAreaR3; 614 618 #if HC_ARCH_BITS == 32 615 619 uint32_t uAutoMsrAreaR3; 616 620 #endif 617 /** 0x36 0- Padding. */618 uint8_t abPadding[0x3f0 - 0x36 0];621 /** 0x368 - Padding. */ 622 uint8_t abPadding[0x3f0 - 0x368]; 619 623 } vmx; 620 624 } CPUM_UNION_NM(s); … … 699 703 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysShadowVmcs, 0x2f0); 700 704 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmDiag, 0x2f8); 701 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxRootMode, 0x2fc); 702 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxNonRootMode, 0x2fd); 703 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInterceptEvents, 0x2fe); 704 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 0x300); 705 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR3, 0x308); 706 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 0x310); 707 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR3, 0x318); 708 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR0, 0x320); 709 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR3, 0x328); 710 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 0x330); 711 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR3, 0x338); 712 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0, 0x340); 713 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR3, 0x348); 714 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR0, 0x350); 715 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR3, 0x358); 705 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmAbort, 0x2fc); 706 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uAbortAux, 0x300); 707 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxRootMode, 0x304); 708 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxNonRootMode, 0x305); 709 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInterceptEvents, 0x306); 710 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 0x308); 711 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR3, 0x310); 712 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 0x318); 713 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR3, 0x320); 714 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR0, 0x328); 715 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR3, 0x330); 716 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 0x338); 717 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR3, 0x340); 718 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0, 0x348); 719 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR3, 0x350); 720 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR0, 0x358); 721 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR3, 0x360); 716 722 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 8); 717 723 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 8); -
trunk/include/VBox/vmm/hm.h
r74287 r74389 136 136 VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM); 137 137 VMM_INT_DECL(const char *) HMVmxGetDiagDesc(VMXVDIAG enmDiag); 138 VMM_INT_DECL(const char *) HMVmxGetAbortDesc(VMXABORT enmAbort); 138 139 VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode); 139 140 /** @} */ -
trunk/include/VBox/vmm/hm_vmx.h
r74381 r74389 1353 1353 /** @name VMX abort reasons. 1354 1354 * See Intel spec. "27.7 VMX Aborts". 1355 * Update HMVmxGetAbortDesc() if new reasons are added. 1355 1356 * @{ 1356 1357 */ 1357 1358 typedef enum 1358 1359 { 1360 /** None - don't use this / uninitialized value. */ 1361 VMXABORT_NONE = 0, 1362 /** VMX abort caused during saving of guest MSRs. */ 1359 1363 VMXABORT_SAVE_GUEST_MSRS = 1, 1364 /** VMX abort caused during host PDPTE checks. */ 1360 1365 VMXBOART_HOST_PDPTE = 2, 1366 /** VMX abort caused due to current VMCS being corrupted. */ 1361 1367 VMXABORT_CURRENT_VMCS_CORRUPT = 3, 1368 /** VMX abort caused during loading of host MSRs. */ 1362 1369 VMXABORT_LOAD_HOST_MSR = 4, 1370 /** VMX abort caused due to a machine-check exception during VM-exit. */ 1363 1371 VMXABORT_MACHINE_CHECK_XCPT = 5, 1364 VMXABORT_HOST_LONG_MODE = 6 1372 /** VMX abort caused due to invalid return to long mode. */ 1373 VMXABORT_HOST_LONG_MODE = 6, 1374 /* Type size hack. */ 1375 VMXABORT_32BIT_HACK = 0x7fffffff 1365 1376 } VMXABORT; 1377 AssertCompileSize(VMXABORT, 4); 1366 1378 /** @} */ 1367 1379 … … 3822 3834 kVmxVDiag_Vmentry_VmxRoot, 3823 3835 kVmxVDiag_Vmentry_Vpid, 3836 kVmxVDiag_Vmexit_MsrStore, 3837 kVmxVDiag_Vmexit_MsrStoreCount, 3838 kVmxVDiag_Vmexit_MsrStorePtrReadPhys, 3839 kVmxVDiag_Vmexit_MsrStoreRing3, 3840 kVmxVDiag_Vmexit_MsrStoreRsvd, 3824 3841 /* Last member for determining array index limit. */ 3825 3842 kVmxVDiag_End -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74310 r74389 336 336 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys , "VmwriteBitmapPtrReadPhys" ), 337 337 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmxRoot , "VmxRoot" ), 338 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid , "Vpid" ) 338 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid , "Vpid" ), 339 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStore , "MsrStore" ), 340 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreCount , "MsrStoreCount" ), 341 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStorePtrReadPhys , "MsrStorePtrReadPhys" ), 342 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRing3 , "MsrStoreRing3" ), 343 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRsvd , "MsrStoreRsvd" ) 339 344 /* kVmxVDiag_End */ 340 345 }; … … 432 437 if (RT_LIKELY((unsigned)enmDiag < RT_ELEMENTS(g_apszVmxVDiagDesc))) 433 438 return g_apszVmxVDiagDesc[enmDiag]; 439 return "Unknown/invalid"; 440 } 441 442 443 /** 444 * Gets the description for a VMX abort reason. 445 * 446 * @returns The descriptive string. 447 * @param enmAbort The VMX abort reason. 448 */ 449 VMM_INT_DECL(const char *) HMVmxGetAbortDesc(VMXABORT enmAbort) 450 { 451 switch (enmAbort) 452 { 453 case VMXABORT_NONE: return "VMXABORT_NONE"; 454 case VMXABORT_SAVE_GUEST_MSRS: return "VMXABORT_SAVE_GUEST_MSRS"; 455 case VMXBOART_HOST_PDPTE: return "VMXBOART_HOST_PDPTE"; 456 case VMXABORT_CURRENT_VMCS_CORRUPT: return "VMXABORT_CURRENT_VMCS_CORRUPT"; 457 case VMXABORT_LOAD_HOST_MSR: return "VMXABORT_LOAD_HOST_MSR"; 458 case VMXABORT_MACHINE_CHECK_XCPT: return "VMXABORT_MACHINE_CHECK_XCPT"; 459 case VMXABORT_HOST_LONG_MODE: return "VMXABORT_HOST_LONG_MODE"; 460 default: 461 break; 462 } 434 463 return "Unknown/invalid"; 435 464 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74376 r74389 407 407 408 408 /** Marks a VM-entry failure with a diagnostic reason, logs and returns. */ 409 #define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_ InsDiag) \409 #define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \ 410 410 do \ 411 411 { \ 412 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_ InsDiag), \413 HMVmxGetDiagDesc(a_InsDiag), (a_pszFailure))); \414 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_ InsDiag); \412 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \ 413 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \ 414 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \ 415 415 return VERR_VMX_VMENTRY_FAILED; \ 416 416 } while (0) 417 418 /** Marks a VM-exit failure with a diagnostic reason, logs and returns. */ 419 #define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \ 420 do \ 421 { \ 422 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \ 423 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \ 424 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \ 425 return VERR_VMX_VMEXIT_FAILED; \ 426 } while (0) 427 417 428 418 429 … … 4076 4087 } 4077 4088 4089 /* 4090 * The VM-entry MSR-load area address need not be a valid guest-physical address if the 4091 * VM-entry MSR load count is 0. If this is the case, bail early without reading it. 4092 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs". 4093 */ 4094 if (cMsrs == 0) 4095 return VINF_SUCCESS; 4096 4078 4097 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u; 4079 4098 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), … … 4081 4100 if (RT_SUCCESS(rc)) 4082 4101 { 4083 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); NOREF(pMsr); 4102 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); 4103 Assert(pMsr); 4084 4104 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) 4085 4105 { … … 4097 4117 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry. 4098 4118 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure 4099 * indicated further with a different diagnostic code. Later, we can try implement handling of 4100 * the MSR in ring-0 if possible, or come up with a better, generic solution. 4119 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated 4120 * further by our own, specific diagnostic code. Later, we can try implement handling of the 4121 * MSR in ring-0 if possible, or come up with a better, generic solution. 4101 4122 */ 4102 4123 pVmcs->u64ExitQual.u = idxMsr; … … 4115 4136 else 4116 4137 { 4117 Log(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc)); 4118 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_MsrLoadPtrReadPhys; 4119 return rc; 4138 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc)); 4139 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys); 4120 4140 } 4121 4141 … … 4244 4264 { 4245 4265 /* 4246 * Load control, debug, segment, descriptor-table registers and some MSRs.4266 * Load guest control, debug, segment, descriptor-table registers and some MSRs. 4247 4267 */ 4248 4268 iemVmxVmentryLoadGuestControlRegsMsr(pVCpu); … … 4541 4561 */ 4542 4562 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4543 Assert(pVmcs);4544 4563 4545 4564 /* Save control registers. */ … … 4676 4695 4677 4696 /** 4697 * Saves guest non-register state as part of VM-exit. 4698 * 4699 * @param pVCpu The cross context virtual CPU structure. 4700 * @param uExitReason The VM-exit reason. 4701 */ 4702 IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason) 4703 { 4704 /* 4705 * Save guest non-register state. 4706 * See Intel spec. 27.3.4 "Saving Non-Register State". 4707 */ 4708 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4709 4710 /* 4711 * Activity-state: VM-exits occur before changing the activity state 4712 * of the processor and hence we shouldn't need to change it. 4713 */ 4714 4715 /* Interruptibility-state. */ 4716 pVmcs->u32GuestIntrState = 0; 4717 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 4718 { /** @todo NSTVMX: Virtual-NMI blocking. */ } 4719 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 4720 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 4721 4722 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 4723 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu)) 4724 { 4725 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI 4726 * currently. */ 4727 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 4728 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 4729 } 4730 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */ 4731 4732 /* Pending debug exceptions. */ 4733 if ( uExitReason != VMX_EXIT_INIT_SIGNAL 4734 && uExitReason != VMX_EXIT_SMI 4735 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK 4736 && !HMVmxIsTrapLikeVmexit(uExitReason)) 4737 { 4738 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when 4739 * block-by-MovSS is in effect. */ 4740 pVmcs->u64GuestPendingDbgXcpt.u = 0; 4741 } 4742 4743 /** @todo NSTVMX: Save VMX preemption timer value. */ 4744 4745 /* PDPTEs. */ 4746 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */ 4747 pVmcs->u64GuestPdpte0.u = 0; 4748 pVmcs->u64GuestPdpte1.u = 0; 4749 pVmcs->u64GuestPdpte2.u = 0; 4750 pVmcs->u64GuestPdpte3.u = 0; 4751 } 4752 4753 4754 /** 4678 4755 * Saves the guest-state as part of VM-exit. 4679 4756 * 4680 4757 * @returns VBox status code. 4681 * @param pVCpu The cross context virtual CPU structure. 4682 */ 4683 IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu) 4684 { 4758 * @param pVCpu The cross context virtual CPU structure. 4759 * @param uExitReason The VM-exit reason. 4760 */ 4761 IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason) 4762 { 4763 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4764 Assert(pVmcs); 4765 4685 4766 /* 4686 * Save control, debug, segment, descriptor-table registers and some MSRs.4767 * Save guest control, debug, segment, descriptor-table registers and some MSRs. 4687 4768 */ 4688 4769 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu); 4689 4770 iemVmxVmexitSaveGuestSegRegs(pVCpu); 4690 4771 4691 /** @todo NSTVMX: rest of state. */ 4772 /* 4773 * Save guest RIP, RSP and RFLAGS. 4774 */ 4775 /* We don't support enclave mode yet. */ 4776 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip; 4777 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp; 4778 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */ 4779 4780 /* Save guest non-register state. */ 4781 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason); 4782 } 4783 4784 4785 /** 4786 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit. 4787 * 4788 * @returns VBox status code. 4789 * @param pVCpu The cross context virtual CPU structure. 4790 * @param uExitReason The VM-exit reason (for diagnostic purposes). 4791 */ 4792 IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason) 4793 { 4794 /* 4795 * Save guest MSRs. 4796 * See Intel spec. 27.4 "Saving MSRs". 4797 */ 4798 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4799 const char *const pszFailure = "VMX-abort"; 4800 4801 /* 4802 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count 4803 * is exceeded including possibly raising #MC exceptions during VMX transition. Our 4804 * implementation causes a VMX-abort followed by a triple-fault. 4805 */ 4806 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu); 4807 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64GuestVmxMiscMsr); 4808 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount; 4809 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); 4810 if (cMsrs <= cMaxSupportedMsrs) 4811 { /* likely */ } 4812 else 4813 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount); 4814 4815 /* 4816 * The VM-exit MSR-store area address need not be a valid guest-physical address if the 4817 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it. 4818 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs". 4819 */ 4820 if (cMsrs == 0) 4821 return VINF_SUCCESS; 4822 4823 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); 4824 Assert(pMsr); 4825 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) 4826 { 4827 if ( !pMsr->u32Reserved 4828 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8 4829 && pMsr->u32Msr != MSR_IA32_SMBASE) 4830 { 4831 int rc = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value); 4832 if (rc == VINF_SUCCESS) 4833 continue; 4834 4835 /* 4836 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit. 4837 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort 4838 * recording the MSR index in a VirtualBox specific VMCS field and indicated further by our 4839 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0 4840 * if possible, or come up with a better, generic solution. 4841 */ 4842 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr; 4843 VMXVDIAG const enmDiag = rc == VINF_CPUM_R3_MSR_READ 4844 ? kVmxVDiag_Vmexit_MsrStoreRing3 4845 : kVmxVDiag_Vmexit_MsrStore; 4846 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag); 4847 } 4848 else 4849 { 4850 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr; 4851 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd); 4852 } 4853 } 4854 4855 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u; 4856 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea, 4857 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE); 4858 if (RT_SUCCESS(rc)) 4859 { /* likely */ } 4860 else 4861 { 4862 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc)); 4863 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrReadPhys); 4864 } 4865 4866 NOREF(uExitReason); 4867 NOREF(pszFailure); 4868 return VINF_SUCCESS; 4869 } 4870 4871 4872 /** 4873 * Performs a VMX abort (due to an fatal error during VM-exit). 4874 * 4875 * @returns VBox status code. 4876 * @param pVCpu The cross context virtual CPU structure. 4877 * @param enmAbort The VMX abort reason. 4878 */ 4879 IEM_STATIC int iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort) 4880 { 4881 /* 4882 * Perform the VMX abort. 4883 * See Intel spec. 27.7 "VMX Aborts". 4884 */ 4885 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort))); 4886 4887 /* We don't support SMX yet. */ 4888 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort; 4889 ASMWriteFence(); 4890 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 4891 { 4892 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu); 4893 uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId); 4894 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort)); 4895 } 4896 4897 return VINF_EM_TRIPLE_FAULT; 4692 4898 } 4693 4899 … … 4697 4903 * 4698 4904 * @param pVCpu The cross context virtual CPU structure. 4905 * @param uExitReason The VM-exit reason. 4699 4906 * @param cbInstr The instruction length. 4700 4907 */ … … 4718 4925 && uExitReason != VMX_EXIT_ERR_MSR_LOAD 4719 4926 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK) 4720 iemVmxVmexitSaveGuestState(pVCpu); 4721 4927 { 4928 iemVmxVmexitSaveGuestState(pVCpu, uExitReason); 4929 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason); 4930 if (RT_SUCCESS(rc)) 4931 { /* likely */ } 4932 else 4933 { 4934 LogFunc(("iemVmxVmexitSaveGuestAutoMsrs failed (rc=%Rrc) -> VMX-Abort\n", rc)); 4935 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS); 4936 } 4937 } 4938 4939 /** @todo NSTVMX: rest of VM-exit (loading host state etc). */ 4722 4940 return VINF_SUCCESS; 4723 4941 } -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r74258 r74389 3039 3039 if (fDumpState & CPUMHWVIRTDUMP_VMX) 3040 3040 { 3041 pHlp->pfnPrintf(pHlp, " GCPhysVmxon = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmxon); 3042 pHlp->pfnPrintf(pHlp, " GCPhysVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmcs); 3043 pHlp->pfnPrintf(pHlp, " GCPhysShadowVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysShadowVmcs); 3044 pHlp->pfnPrintf(pHlp, " enmDiag = %u (%s)\n", pCtx->hwvirt.vmx.enmDiag, HMVmxGetDiagDesc(pCtx->hwvirt.vmx.enmDiag)); 3045 pHlp->pfnPrintf(pHlp, " enmAbort = %u (%s)\n", pCtx->hwvirt.vmx.enmAbort, HMVmxGetAbortDesc(pCtx->hwvirt.vmx.enmAbort)); 3046 pHlp->pfnPrintf(pHlp, " uAbortAux = %u (%#x)\n", pCtx->hwvirt.vmx.uAbortAux, pCtx->hwvirt.vmx.uAbortAux); 3041 3047 pHlp->pfnPrintf(pHlp, " fInVmxRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxRootMode); 3042 3048 pHlp->pfnPrintf(pHlp, " fInVmxNonRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxNonRootMode); 3043 pHlp->pfnPrintf(pHlp, " GCPhysVmxon = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmxon); 3044 pHlp->pfnPrintf(pHlp, " GCPhysVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmcs); 3045 pHlp->pfnPrintf(pHlp, " enmDiag = %u (%s)\n", pCtx->hwvirt.vmx.enmDiag, 3046 HMVmxGetDiagDesc(pCtx->hwvirt.vmx.enmDiag)); 3049 pHlp->pfnPrintf(pHlp, " fInterceptEvents = %RTbool\n", pCtx->hwvirt.vmx.fInterceptEvents); 3050 3047 3051 /** @todo NSTVMX: Dump remaining/new fields. */ 3048 3052 }
Note:
See TracChangeset
for help on using the changeset viewer.

