Changeset 64663 in vbox
- Timestamp:
- Nov 14, 2016 3:46:35 PM (8 years ago)
- Location:
- trunk
- Files:
-
- 13 edited
-
include/VBox/vmm/apic.h (modified) (1 diff)
-
include/VBox/vmm/cpum.mac (modified) (1 diff)
-
include/VBox/vmm/cpumctx.h (modified) (2 diffs)
-
include/VBox/vmm/pdmapi.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/APICAll.cpp (modified) (12 diffs)
-
src/VBox/VMM/VMMAll/PDMAll.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/APIC.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/CPUM.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp (modified) (6 diffs)
-
src/VBox/VMM/include/CPUMInternal.mac (modified) (2 diffs)
-
src/VBox/VMM/testcase/tstVMStruct.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/apic.h
r64655 r64663 176 176 177 177 /* These functions are VMM internal. */ 178 VMM_INT_DECL(bool) APICIsEnabled(PVMCPU pVCpu); 178 179 VMM_INT_DECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr); 179 180 VMM_INT_DECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr); -
trunk/include/VBox/vmm/cpum.mac
r62476 r64663 245 245 .msrSFMASK resb 8 246 246 .msrKERNELGSBASE resb 8 247 . msrApicBaseresb 8247 .uMsrPadding0 resb 8 248 248 alignb 8 249 249 .aXcr resq 2 -
trunk/include/VBox/vmm/cpumctx.h
r62476 r64663 413 413 uint64_t msrSFMASK; /**< syscall flag mask. */ 414 414 uint64_t msrKERNELGSBASE; /**< swapgs exchange value. */ 415 uint64_t msrApicBase; /**< The local APIC base (IA32_APIC_BASEMSR). */415 uint64_t uMsrPadding0; /**< no longer used (used to hold a copy of APIC base MSR). */ 416 416 /** @} */ 417 417 … … 479 479 AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 528); 480 480 AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 536); 481 AssertCompileMemberOffset(CPUMCTX, msrApicBase, 544);482 481 AssertCompileMemberOffset(CPUMCTX, aXcr, 552); 483 482 AssertCompileMemberOffset(CPUMCTX, fXStateMask, 568); -
trunk/include/VBox/vmm/pdmapi.h
r64655 r64663 48 48 VMMDECL(int) PDMIsaSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level, uint32_t uTagSrc); 49 49 VMM_INT_DECL(bool) PDMHasIoApic(PVM pVM); 50 VMM_INT_DECL(bool) PDMHasApic(PVM pVM); 50 51 VMM_INT_DECL(int) PDMIoApicSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level, uint32_t uTagSrc); 51 52 VMM_INT_DECL(int) PDMIoApicBroadcastEoi(PVM pVM, uint8_t uVector); -
trunk/src/VBox/VMM/VMMAll/APICAll.cpp
r64662 r64663 412 412 * 413 413 * @returns true if enabled, false otherwise. 414 */ 415 DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu) 414 * @param pVCpu The cross context virtual CPU structure. 415 */ 416 VMM_INT_DECL(bool) APICIsEnabled(PVMCPU pVCpu) 416 417 { 417 418 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu); … … 616 617 { 617 618 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu) 618 && apicIsEnabled(&pVM->aCpus[idCpu]))619 && APICIsEnabled(&pVM->aCpus[idCpu])) 619 620 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode); 620 621 } … … 626 627 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet); 627 628 if ( idCpu < pVM->cCpus 628 && apicIsEnabled(&pVM->aCpus[idCpu]))629 && APICIsEnabled(&pVM->aCpus[idCpu])) 629 630 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode); 630 631 else … … 652 653 { 653 654 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu) 654 && apicIsEnabled(&pVM->aCpus[idCpu]))655 && APICIsEnabled(&pVM->aCpus[idCpu])) 655 656 { 656 657 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu)); … … 1879 1880 */ 1880 1881 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM)); 1881 if ( apicIsEnabled(pVCpu))1882 if (APICIsEnabled(pVCpu)) 1882 1883 { /* likely */ } 1883 1884 else … … 1997 1998 */ 1998 1999 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM)); 1999 if ( apicIsEnabled(pVCpu))2000 if (APICIsEnabled(pVCpu)) 2000 2001 { /* likely */ } 2001 2002 else … … 2347 2348 VMMDECL(int) APICSetTpr(PVMCPU pVCpu, uint8_t u8Tpr) 2348 2349 { 2349 if ( apicIsEnabled(pVCpu))2350 if (APICIsEnabled(pVCpu)) 2350 2351 return VBOXSTRICTRC_VAL(apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */)); 2351 2352 return VERR_PDM_NO_APIC_INSTANCE; … … 2390 2391 { 2391 2392 VMCPU_ASSERT_EMT(pVCpu); 2392 if ( apicIsEnabled(pVCpu))2393 if (APICIsEnabled(pVCpu)) 2393 2394 { 2394 2395 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu); … … 2427 2428 2428 2429 PVMCPU pVCpu = &pVM->aCpus[0]; 2429 if ( apicIsEnabled(pVCpu))2430 if (APICIsEnabled(pVCpu)) 2430 2431 { 2431 2432 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu); … … 2459 2460 * If the APIC isn't enabled, do nothing and pretend success. 2460 2461 */ 2461 if ( apicIsEnabled(&pVM->aCpus[0]))2462 if (APICIsEnabled(&pVM->aCpus[0])) 2462 2463 { /* likely */ } 2463 2464 else … … 2508 2509 2509 2510 /* If the APIC is enabled, the interrupt is subject to LVT programming. */ 2510 if ( apicIsEnabled(pVCpu))2511 if (APICIsEnabled(pVCpu)) 2511 2512 { 2512 2513 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu); … … 2675 2676 2676 2677 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu); 2677 bool const fApicHwEnabled = apicIsEnabled(pVCpu);2678 bool const fApicHwEnabled = APICIsEnabled(pVCpu); 2678 2679 if ( fApicHwEnabled 2679 2680 && pXApicPage->svr.u.fApicSoftwareEnable) -
trunk/src/VBox/VMM/VMMAll/PDMAll.cpp
r64655 r64663 248 248 * Returns the presence of an IO-APIC. 249 249 * 250 * @returns VBoxtrue if an IO-APIC is present.250 * @returns true if an IO-APIC is present. 251 251 * @param pVM The cross context VM structure. 252 252 */ … … 254 254 { 255 255 return pVM->pdm.s.IoApic.CTX_SUFF(pDevIns) != NULL; 256 } 257 258 259 /** 260 * Returns the presence of an APIC. 261 * 262 * @returns true if an APIC is present. 263 * @param pVM The cross context VM structure. 264 */ 265 VMM_INT_DECL(bool) PDMHasApic(PVM pVM) 266 { 267 return pVM->pdm.s.Apic.CTX_SUFF(pDevIns) != NULL; 256 268 } 257 269 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r64655 r64663 1608 1608 return VINF_SUCCESS; 1609 1609 1610 bool fPendingIntr; 1611 uint8_t u8Tpr; 1612 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */); 1613 AssertRCReturn(rc, rc); 1614 1615 /* Assume that we need to trap all TPR accesses and thus need not check on 1616 every #VMEXIT if we should update the TPR. */ 1617 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking); 1618 pVCpu->hm.s.svm.fSyncVTpr = false; 1619 1620 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 1621 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive) 1622 { 1623 pCtx->msrLSTAR = u8Tpr; 1624 1625 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 1626 if (fPendingIntr) 1627 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 1610 int rc = VINF_SUCCESS; 1611 PVM pVM = pVCpu->CTX_SUFF(pVM); 1612 if ( PDMHasApic(pVM) 1613 && APICIsEnabled(pVCpu)) 1614 { 1615 bool fPendingIntr; 1616 uint8_t u8Tpr; 1617 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */); 1618 AssertRCReturn(rc, rc); 1619 1620 /* Assume that we need to trap all TPR accesses and thus need not check on 1621 every #VMEXIT if we should update the TPR. */ 1622 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking); 1623 pVCpu->hm.s.svm.fSyncVTpr = false; 1624 1625 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 1626 if (pVM->hm.s.fTPRPatchingActive) 1627 { 1628 pCtx->msrLSTAR = u8Tpr; 1629 1630 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 1631 if (fPendingIntr) 1632 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 1633 else 1634 { 1635 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1636 pVCpu->hm.s.svm.fSyncVTpr = true; 1637 } 1638 } 1628 1639 else 1629 1640 { 1630 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1631 pVCpu->hm.s.svm.fSyncVTpr = true; 1632 } 1633 } 1634 else 1635 { 1636 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 1637 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4); 1638 1639 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */ 1640 if (fPendingIntr) 1641 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 1642 else 1643 { 1644 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 1645 pVCpu->hm.s.svm.fSyncVTpr = true; 1646 } 1647 1648 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 1641 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 1642 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4); 1643 1644 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */ 1645 if (fPendingIntr) 1646 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 1647 else 1648 { 1649 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 1650 pVCpu->hm.s.svm.fSyncVTpr = true; 1651 } 1652 1653 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 1654 } 1649 1655 } 1650 1656 … … 5089 5095 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 5090 5096 { 5091 RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;5097 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu); 5092 5098 GCPhysApicBase &= PAGE_BASE_GC_MASK; 5093 5099 … … 5353 5359 { 5354 5360 RTGCPHYS GCPhysApicBase; 5355 GCPhysApicBase = pCtx->msrApicBase;5361 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu); 5356 5362 GCPhysApicBase &= PAGE_BASE_GC_MASK; 5357 5363 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r64655 r64663 3409 3409 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE)) 3410 3410 { 3411 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */ 3412 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 3413 { 3414 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); 3415 3416 bool fPendingIntr = false; 3417 uint8_t u8Tpr = 0; 3418 uint8_t u8PendingIntr = 0; 3419 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr); 3420 AssertRCReturn(rc, rc); 3421 3422 /* 3423 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when 3424 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt. 3425 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver 3426 * the interrupt when we VM-exit for other reasons. 3427 */ 3428 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */ 3429 uint32_t u32TprThreshold = 0; 3430 if (fPendingIntr) 3411 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) 3412 && APICIsEnabled(pVCpu)) 3413 { 3414 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */ 3415 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 3431 3416 { 3432 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */ 3433 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf; 3434 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf; 3435 if (u8PendingPriority <= u8TprPriority) 3436 u32TprThreshold = u8PendingPriority; 3437 else 3438 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */ 3417 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); 3418 3419 bool fPendingIntr = false; 3420 uint8_t u8Tpr = 0; 3421 uint8_t u8PendingIntr = 0; 3422 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr); 3423 AssertRCReturn(rc, rc); 3424 3425 /* 3426 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when 3427 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt. 3428 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver 3429 * the interrupt when we VM-exit for other reasons. 3430 */ 3431 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */ 3432 uint32_t u32TprThreshold = 0; 3433 if (fPendingIntr) 3434 { 3435 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */ 3436 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf; 3437 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf; 3438 if (u8PendingPriority <= u8TprPriority) 3439 u32TprThreshold = u8PendingPriority; 3440 else 3441 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */ 3442 } 3443 3444 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold); 3445 AssertRCReturn(rc, rc); 3439 3446 } 3440 3447 3441 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold); 3442 AssertRCReturn(rc, rc); 3443 } 3444 3448 #ifndef IEM_VERIFICATION_MODE_FULL 3449 /* Setup the Virtualized APIC accesses. */ 3450 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 3451 { 3452 uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu); 3453 if (u64MsrApicBase != pVCpu->hm.s.vmx.u64MsrApicBase) 3454 { 3455 PVM pVM = pVCpu->CTX_SUFF(pVM); 3456 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 3457 RTGCPHYS GCPhysApicBase; 3458 GCPhysApicBase = u64MsrApicBase; 3459 GCPhysApicBase &= PAGE_BASE_GC_MASK; 3460 3461 /* Unalias any existing mapping. */ 3462 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase); 3463 AssertRCReturn(rc, rc); 3464 3465 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */ 3466 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase)); 3467 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 3468 AssertRCReturn(rc, rc); 3469 3470 /* Update VMX's cache of the APIC base. */ 3471 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase; 3472 } 3473 } 3474 #endif /* !IEM_VERIFICATION_MODE_FULL */ 3475 } 3445 3476 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 3446 3477 } 3478 3447 3479 return rc; 3448 3480 } … … 8602 8634 else 8603 8635 return rcStrict; 8604 8605 /** @todo r=ramshankar: Why can't we do this when the APIC base changes8606 * in hmR0VmxLoadGuestApicState()? Also we can stop caching the8607 * APIC base in several places just for HM usage and just take the8608 * function call hit in load-guest state. */8609 #ifndef IEM_VERIFICATION_MODE_FULL8610 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */8611 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase8612 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))8613 {8614 Assert(pVM->hm.s.vmx.HCPhysApicAccess);8615 RTGCPHYS GCPhysApicBase;8616 GCPhysApicBase = pMixedCtx->msrApicBase;8617 GCPhysApicBase &= PAGE_BASE_GC_MASK;8618 8619 /* Unalias any existing mapping. */8620 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);8621 AssertRCReturn(rc, rc);8622 8623 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */8624 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGp\n", GCPhysApicBase));8625 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);8626 AssertRCReturn(rc, rc);8627 8628 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;8629 }8630 #endif /* !IEM_VERIFICATION_MODE_FULL */8631 8636 8632 8637 if (TRPMHasTrap(pVCpu)) … … 12093 12098 12094 12099 /* If this is an X2APIC WRMSR access, update the APIC state as well. */ 12095 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START 12096 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END) 12100 if ( pMixedCtx->ecx == MSR_IA32_APICBASE 12101 || ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START 12102 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)) 12097 12103 { 12098 12104 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register … … 12698 12704 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 12699 12705 12700 RTGCPHYS GCPhys = p MixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */12706 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */ 12701 12707 GCPhys &= PAGE_BASE_GC_MASK; 12702 12708 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification); -
trunk/src/VBox/VMM/VMMR3/APIC.cpp
r64655 r64663 1666 1666 { 1667 1667 case PDMAPICMODE_NONE: 1668 /** @todo permanently disabling the APIC won't really work (needs 1669 * fixing in HM, CPUM, PDM and possibly other places). See 1670 * @bugref{8353}. */ 1671 return VMR3SetError(pVM->pUVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "APIC mode 'none' is not supported yet."); 1668 LogRel(("APIC: APIC maximum mode configured as 'None', effectively disabled/not-present!\n")); 1672 1669 case PDMAPICMODE_APIC: 1673 1670 case PDMAPICMODE_X2APIC: -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r64655 r64663 262 262 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK), 263 263 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE), 264 /* msrApicBase is not included here, it resides in the APIC device state. */265 264 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel), 266 265 SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel), … … 1152 1151 /* C-state control. Guesses. */ 1153 1152 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28); 1154 1155 1156 /*1157 * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base1158 * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.1159 */1160 pCtx->msrApicBase = APICGetBaseMsrNoCheck(pVCpu);1161 LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", pVCpu->idCpu, pVCpu->cpum.s.Guest.msrApicBase));1162 1153 } 1163 1154 … … 1606 1597 /* Notify PGM of the NXE states in case they've changed. */ 1607 1598 PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE)); 1608 1609 /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */1610 pVCpu->cpum.s.Guest.msrApicBase = APICGetBaseMsrNoCheck(pVCpu);1611 LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", idCpu, pVCpu->cpum.s.Guest.msrApicBase));1612 1599 1613 1600 /* During init. this is done in CPUMR3InitCompleted(). */ … … 2524 2511 } 2525 2512 2526 case VMINITCOMPLETED_RING0:2527 {2528 /* Cache the APIC base (from the APIC device) once it has been initialized. */2529 for (VMCPUID i = 0; i < pVM->cCpus; i++)2530 {2531 PVMCPU pVCpu = &pVM->aCpus[i];2532 pVCpu->cpum.s.Guest.msrApicBase = APICGetBaseMsrNoCheck(pVCpu);2533 LogRel(("CPUM%u: Cached APIC base MSR = %#RX64\n", i, pVCpu->cpum.s.Guest.msrApicBase));2534 }2535 break;2536 }2537 2538 2513 default: 2539 2514 break; -
trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
r62478 r64663 58 58 #include <VBox/vmm/cpum.h> 59 59 #include <VBox/vmm/pgm.h> 60 #include <VBox/vmm/apic.h> 60 61 #include <VBox/vmm/dbgf.h> 61 62 #include <VBox/vmm/dbgfcorefmt.h> … … 314 315 * Gets the guest-CPU context suitable for dumping into the core file. 315 316 * 316 * @param pVM The cross context VM structure. 317 * @param pCtx Pointer to the guest-CPU context. 317 * @param pVCpu The cross context virtual CPU structure. 318 318 * @param pDbgfCpu Where to dump the guest-CPU data. 319 319 */ 320 static void dbgfR3GetCoreCpu(PVM pVM, PCPUMCTX pCtx, PDBGFCORECPU pDbgfCpu)320 static void dbgfR3GetCoreCpu(PVMCPU pVCpu, PDBGFCORECPU pDbgfCpu) 321 321 { 322 322 #define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \ … … 328 328 } while (0) 329 329 330 PVM pVM = pVCpu->CTX_SUFF(pVM); 331 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 330 332 pDbgfCpu->rax = pCtx->rax; 331 333 pDbgfCpu->rbx = pCtx->rbx; … … 375 377 pDbgfCpu->msrSFMASK = pCtx->msrSFMASK; 376 378 pDbgfCpu->msrKernelGSBase = pCtx->msrKERNELGSBASE; 377 pDbgfCpu->msrApicBase = pCtx->msrApicBase;379 pDbgfCpu->msrApicBase = APICGetBaseMsrNoCheck(pVCpu); 378 380 pDbgfCpu->aXcr[0] = pCtx->aXcr[0]; 379 381 pDbgfCpu->aXcr[1] = pCtx->aXcr[1]; … … 506 508 /* 507 509 * Write the CPU context note headers and data. 510 * We allocate the DBGFCORECPU struct. rather than using the stack as it can be pretty large due to X86XSAVEAREA. 508 511 */ 509 512 Assert(RTFileTell(hFile) == offCpuDumps); … … 517 520 for (uint32_t iCpu = 0; iCpu < pVM->cCpus; iCpu++) 518 521 { 519 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 520 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 521 if (RT_UNLIKELY(!pCtx)) 522 { 523 LogRel((DBGFLOG_NAME ": CPUMQueryGuestCtxPtr failed for vCPU[%u]\n", iCpu)); 524 RTMemFree(pDbgfCoreCpu); 525 return VERR_INVALID_POINTER; 526 } 527 522 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 528 523 RT_BZERO(pDbgfCoreCpu, sizeof(*pDbgfCoreCpu)); 529 dbgfR3GetCoreCpu(pVM, pCtx, pDbgfCoreCpu); 524 dbgfR3GetCoreCpu(pVCpu, pDbgfCoreCpu); 525 530 526 rc = Elf64WriteNoteHdr(hFile, NT_VBOXCPU, g_pcszCoreVBoxCpu, pDbgfCoreCpu, sizeof(*pDbgfCoreCpu)); 531 527 if (RT_FAILURE(rc)) -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r62478 r64663 219 219 .Guest.msrSFMASK resb 8 220 220 .Guest.msrKERNELGSBASE resb 8 221 .Guest. msrApicBaseresb 8221 .Guest.uMsrPadding0 resb 8 222 222 .Guest.aXcr resq 2 223 223 .Guest.fXStateMask resq 1 … … 477 477 .Hyper.msrSFMASK resb 8 478 478 .Hyper.msrKERNELGSBASE resb 8 479 .Hyper. msrApicBaseresb 8479 .Hyper.uMsrPadding0 resb 8 480 480 .Hyper.aXcr resq 2 481 481 .Hyper.fXStateMask resq 1 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r64655 r64663 183 183 GEN_CHECK_OFF(CPUMCTX, msrSFMASK); 184 184 GEN_CHECK_OFF(CPUMCTX, msrKERNELGSBASE); 185 GEN_CHECK_OFF(CPUMCTX, msrApicBase);186 185 GEN_CHECK_OFF(CPUMCTX, ldtr); 187 186 GEN_CHECK_OFF(CPUMCTX, tr);
Note:
See TracChangeset
for help on using the changeset viewer.

