Changeset 24243 in vbox
- Timestamp:
- Nov 2, 2009 10:24:15 AM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
-
HWACCM.cpp (modified) (21 diffs)
-
HWACCMInternal.h (modified) (4 diffs)
-
VMMR0/HWSVMR0.cpp (modified) (8 diffs)
-
VMMR0/HWVMXR0.cpp (modified) (5 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r23794 r24243 1105 1105 LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext)); 1106 1106 } 1107 1108 /* TPR patching status logging. */ 1109 if (pVM->hwaccm.s.fTRPPatchingAllowed) 1110 { 1111 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1112 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 1113 { 1114 pVM->hwaccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */ 1115 LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n")); 1116 } 1117 else 1118 { 1119 /* TPR patching needs access to the MSR_K8_LSTAR msr. */ 1120 if (!CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE)) 1121 { 1122 pVM->hwaccm.s.fTRPPatchingAllowed = false; 1123 LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n")); 1124 } 1125 } 1126 } 1127 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled")); 1107 1128 } 1108 1129 else … … 1455 1476 pVM->hwaccm.s.pFreeGuestPatchMem = 0; 1456 1477 pVM->hwaccm.s.cbGuestPatchMem = 0; 1457 pVM->hwaccm.s. svm.cPatches = 0;1458 pVM->hwaccm.s. svm.PatchTree = 0;1459 pVM->hwaccm.s. svm.fTPRPatchingActive = false;1460 ASMMemZero32(pVM->hwaccm.s. svm.aPatches, sizeof(pVM->hwaccm.s.svm.aPatches));1478 pVM->hwaccm.s.cPatches = 0; 1479 pVM->hwaccm.s.PatchTree = 0; 1480 pVM->hwaccm.s.fTPRPatchingActive = false; 1481 ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches)); 1461 1482 } 1462 1483 … … 1479 1500 1480 1501 Log(("hwaccmR3RemovePatches\n")); 1481 for (unsigned i = 0; i < pVM->hwaccm.s. svm.cPatches; i++)1502 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++) 1482 1503 { 1483 1504 uint8_t szInstr[15]; 1484 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s. svm.aPatches[i];1505 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i]; 1485 1506 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key; 1486 1507 int rc; … … 1517 1538 #endif 1518 1539 } 1519 pVM->hwaccm.s. svm.cPatches= 0;1520 pVM->hwaccm.s. svm.PatchTree= 0;1521 pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;1522 pVM->hwaccm.s. svm.fTPRPatchingActive = false;1540 pVM->hwaccm.s.cPatches = 0; 1541 pVM->hwaccm.s.PatchTree = 0; 1542 pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem; 1543 pVM->hwaccm.s.fTPRPatchingActive = false; 1523 1544 return VINF_SUCCESS; 1524 1545 } … … 1555 1576 { 1556 1577 Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem)); 1557 1558 /* Current TPR patching only applies to AMD cpus.1559 * Needs to be extended to Intel CPUs without the APIC TPR hardware optimization.1560 */1561 if (CPUMGetHostCpuVendor(pVM) != CPUMCPUVENDOR_AMD)1562 return VERR_NOT_SUPPORTED;1563 1564 1578 if (pVM->cCpus > 1) 1565 1579 { … … 1595 1609 pVM->hwaccm.s.pFreeGuestPatchMem = 0; 1596 1610 pVM->hwaccm.s.cbGuestPatchMem = 0; 1597 pVM->hwaccm.s. svm.fTPRPatchingActive = false;1611 pVM->hwaccm.s.fTPRPatchingActive = false; 1598 1612 return VINF_SUCCESS; 1599 1613 } … … 1624 1638 1625 1639 /* Two or more VCPUs were racing to patch this instruction. */ 1626 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s. svm.PatchTree, (AVLOU32KEY)pCtx->eip);1640 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1627 1641 if (pPatch) 1628 1642 return VINF_SUCCESS; 1629 1643 1630 Assert(pVM->hwaccm.s. svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));1644 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches)); 1631 1645 1632 1646 int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp); … … 1637 1651 { 1638 1652 uint8_t aVMMCall[3] = { 0xf, 0x1, 0xd9}; 1639 uint32_t idx = pVM->hwaccm.s. svm.cPatches;1640 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s. svm.aPatches[idx];1653 uint32_t idx = pVM->hwaccm.s.cPatches; 1654 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[idx]; 1641 1655 1642 1656 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp); … … 1688 1702 && pDis->param2.flags == USE_IMMEDIATE8 1689 1703 && pDis->param2.parval == 4 1690 && oldcbOp + cbOp < sizeof(pVM->hwaccm.s. svm.aPatches[idx].aOpcode))1704 && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode)) 1691 1705 { 1692 1706 uint8_t szInstr[15]; … … 1729 1743 1730 1744 pPatch->Core.Key = pCtx->eip; 1731 rc = RTAvloU32Insert(&pVM->hwaccm.s. svm.PatchTree, &pPatch->Core);1745 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core); 1732 1746 AssertRC(rc); 1733 1747 1734 pVM->hwaccm.s. svm.cPatches++;1748 pVM->hwaccm.s.cPatches++; 1735 1749 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess); 1736 1750 return VINF_SUCCESS; … … 1738 1752 1739 1753 /* Save invalid patch, so we will not try again. */ 1740 uint32_t idx = pVM->hwaccm.s. svm.cPatches;1754 uint32_t idx = pVM->hwaccm.s.cPatches; 1741 1755 1742 1756 #ifdef LOG_ENABLED … … 1747 1761 #endif 1748 1762 1749 pPatch = &pVM->hwaccm.s. svm.aPatches[idx];1763 pPatch = &pVM->hwaccm.s.aPatches[idx]; 1750 1764 pPatch->Core.Key = pCtx->eip; 1751 1765 pPatch->enmType = HWACCMTPRINSTR_INVALID; 1752 rc = RTAvloU32Insert(&pVM->hwaccm.s. svm.PatchTree, &pPatch->Core);1766 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core); 1753 1767 AssertRC(rc); 1754 pVM->hwaccm.s. svm.cPatches++;1768 pVM->hwaccm.s.cPatches++; 1755 1769 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure); 1756 1770 return VINF_SUCCESS; … … 1782 1796 return VINF_SUCCESS; 1783 1797 1784 Assert(pVM->hwaccm.s. svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));1798 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches)); 1785 1799 1786 1800 /* Two or more VCPUs were racing to patch this instruction. */ 1787 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s. svm.PatchTree, (AVLOU32KEY)pCtx->eip);1801 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1788 1802 if (pPatch) 1789 1803 { … … 1800 1814 && cbOp >= 5) 1801 1815 { 1802 uint32_t idx = pVM->hwaccm.s. svm.cPatches;1803 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s. svm.aPatches[idx];1816 uint32_t idx = pVM->hwaccm.s.cPatches; 1817 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[idx]; 1804 1818 uint8_t aPatch[64]; 1805 1819 uint32_t off = 0; … … 1963 1977 1964 1978 pPatch->Core.Key = pCtx->eip; 1965 rc = RTAvloU32Insert(&pVM->hwaccm.s. svm.PatchTree, &pPatch->Core);1979 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core); 1966 1980 AssertRC(rc); 1967 1981 1968 pVM->hwaccm.s. svm.cPatches++;1969 pVM->hwaccm.s. svm.fTPRPatchingActive = true;1982 pVM->hwaccm.s.cPatches++; 1983 pVM->hwaccm.s.fTPRPatchingActive = true; 1970 1984 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess); 1971 1985 return VINF_SUCCESS; … … 1976 1990 1977 1991 /* Save invalid patch, so we will not try again. */ 1978 uint32_t idx = pVM->hwaccm.s. svm.cPatches;1992 uint32_t idx = pVM->hwaccm.s.cPatches; 1979 1993 1980 1994 #ifdef LOG_ENABLED … … 1984 1998 #endif 1985 1999 1986 pPatch = &pVM->hwaccm.s. svm.aPatches[idx];2000 pPatch = &pVM->hwaccm.s.aPatches[idx]; 1987 2001 pPatch->Core.Key = pCtx->eip; 1988 2002 pPatch->enmType = HWACCMTPRINSTR_INVALID; 1989 rc = RTAvloU32Insert(&pVM->hwaccm.s. svm.PatchTree, &pPatch->Core);2003 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core); 1990 2004 AssertRC(rc); 1991 pVM->hwaccm.s. svm.cPatches++;2005 pVM->hwaccm.s.cPatches++; 1992 2006 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure); 1993 2007 return VINF_SUCCESS; … … 2420 2434 2421 2435 /* Store all the guest patch records too. */ 2422 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s. svm.cPatches);2436 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches); 2423 2437 AssertRCReturn(rc, rc); 2424 2438 2425 for (unsigned i = 0; i < pVM->hwaccm.s. svm.cPatches; i++)2426 { 2427 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s. svm.aPatches[i];2439 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++) 2440 { 2441 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i]; 2428 2442 2429 2443 rc = SSMR3PutU32(pSSM, pPatch->Core.Key); … … 2525 2539 2526 2540 /* Fetch all TPR patch records. */ 2527 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s. svm.cPatches);2541 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches); 2528 2542 AssertRCReturn(rc, rc); 2529 2543 2530 for (unsigned i = 0; i < pVM->hwaccm.s. svm.cPatches; i++)2531 { 2532 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s. svm.aPatches[i];2544 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++) 2545 { 2546 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i]; 2533 2547 2534 2548 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key); … … 2551 2565 2552 2566 if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT) 2553 pVM->hwaccm.s. svm.fTPRPatchingActive = true;2554 2555 Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s. svm.fTPRPatchingActive == false);2567 pVM->hwaccm.s.fTPRPatchingActive = true; 2568 2569 Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false); 2556 2570 2557 2571 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand); … … 2576 2590 Log(("cFaults = %d\n", pPatch->cFaults)); 2577 2591 Log(("target = %x\n", pPatch->pJumpTarget)); 2578 rc = RTAvloU32Insert(&pVM->hwaccm.s. svm.PatchTree, &pPatch->Core);2592 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core); 2579 2593 AssertRC(rc); 2580 2594 } -
trunk/src/VBox/VMM/HWACCMInternal.h
r23699 r24243 269 269 bool fGlobalInit; 270 270 271 /** Set when TPR patching is active. */ 272 bool fTPRPatchingActive; 273 bool u8Alignment[7]; 274 271 275 /** And mask for copying register contents. */ 272 276 uint64_t u64RegisterMask; … … 402 406 /** Set if erratum 170 affects the AMD cpu. */ 403 407 bool fAlwaysFlushTLB; 404 /** Set when TPR patching is active. */ 405 bool fTPRPatchingActive; 408 bool u8Alignment; 406 409 407 410 /** R0 memory object for the IO bitmap (12kb). */ … … 417 420 /** SVM feature bits from cpuid 0x8000000a */ 418 421 uint32_t u32Features; 419 420 /**421 * AVL tree with all patches (active or disabled) sorted by guest instruction address422 */423 AVLOU32TREE PatchTree;424 uint32_t cPatches;425 HWACCMTPRPATCH aPatches[64];426 422 } svm; 423 424 /** 425 * AVL tree with all patches (active or disabled) sorted by guest instruction address 426 */ 427 AVLOU32TREE PatchTree; 428 uint32_t cPatches; 429 HWACCMTPRPATCH aPatches[64]; 427 430 428 431 struct … … 437 440 /** HWACCMR0Init was run */ 438 441 bool fHWACCMR0Init; 439 bool u8Alignment [7];442 bool u8Alignment1[7]; 440 443 441 444 STAMCOUNTER StatTPRPatchSuccess; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r24216 r24243 1095 1095 AssertRC(rc); 1096 1096 1097 if (pVM->hwaccm.s. svm.fTPRPatchingActive)1097 if (pVM->hwaccm.s.fTPRPatchingActive) 1098 1098 { 1099 1099 /* Our patch code uses LSTAR for TPR caching. */ … … 1519 1519 if (fSyncTPR) 1520 1520 { 1521 if (pVM->hwaccm.s. svm.fTPRPatchingActive)1521 if (pVM->hwaccm.s.fTPRPatchingActive) 1522 1522 { 1523 1523 if ((pCtx->msrLSTAR & 0xff) != u8LastTPR) … … 1655 1655 && CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0 1656 1656 && !CPUMIsGuestInLongModeEx(pCtx) 1657 && pVM->hwaccm.s. svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches))1657 && pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches)) 1658 1658 { 1659 1659 RTGCPHYS GCPhysApicBase, GCPhys; … … 1666 1666 { 1667 1667 /* Only attempt to patch the instruction once. */ 1668 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s. svm.PatchTree, (AVLOU32KEY)pCtx->eip);1668 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1669 1669 if (!pPatch) 1670 1670 { … … 1823 1823 && CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0 1824 1824 && !CPUMIsGuestInLongModeEx(pCtx) 1825 && pVM->hwaccm.s. svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches))1825 && pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches)) 1826 1826 { 1827 1827 RTGCPHYS GCPhysApicBase; … … 1832 1832 { 1833 1833 /* Only attempt to patch the instruction once. */ 1834 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s. svm.PatchTree, (AVLOU32KEY)pCtx->eip);1834 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1835 1835 if (!pPatch) 1836 1836 { … … 2380 2380 2381 2381 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */ 2382 if ( pVM->hwaccm.s. svm.fTPRPatchingActive2382 if ( pVM->hwaccm.s.fTPRPatchingActive 2383 2383 && pCtx->ecx == MSR_K8_LSTAR 2384 2384 && pVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */) … … 2529 2529 uint8_t u8Tpr; 2530 2530 2531 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s. svm.PatchTree, (AVLOU32KEY)pCtx->eip);2531 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2532 2532 if (!pPatch) 2533 2533 break; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r24216 r24243 2278 2278 /* Check if we need to use TPR shadowing. */ 2279 2279 if ( CPUMIsGuestInLongModeEx(pCtx) 2280 || ( ( pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)2280 || ( ((pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || pVM->hwaccm.s.fTRPPatchingAllowed) 2281 2281 && pVM->hwaccm.s.fHasIoApic) 2282 2282 ) … … 2490 2490 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, (fPending) ? (u8LastTPR >> 4) : 0); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 2491 2491 AssertRC(rc); 2492 2493 if (pVM->hwaccm.s.fTPRPatchingActive) 2494 { 2495 Assert(!CPUMIsGuestInLongModeEx(pCtx)); 2496 /* Our patch code uses LSTAR for TPR caching. */ 2497 pCtx->msrLSTAR = u8LastTPR; 2498 2499 if (fPending) 2500 /* A TPR change could activate a pending interrupt, so catch lstar writes. */ 2501 vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, false); 2502 else 2503 /* No interrupts are pending, so we don't need to be explicitely notified. 2504 * There are enough world switches for detecting pending interrupts. 2505 */ 2506 vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true); 2507 } 2492 2508 } 2493 2509 … … 2698 2714 2699 2715 /* Sync back the TPR if it was changed. */ 2700 if ( fSetupTPRCaching 2701 && u8LastTPR != pVCpu->hwaccm.s.vmx.pVAPIC[0x80]) 2702 { 2703 rc = PDMApicSetTPR(pVCpu, pVCpu->hwaccm.s.vmx.pVAPIC[0x80]); 2704 AssertRC(rc); 2716 if (fSetupTPRCaching) 2717 { 2718 if (pVM->hwaccm.s.fTPRPatchingActive) 2719 { 2720 if ((pCtx->msrLSTAR & 0xff) != u8LastTPR) 2721 { 2722 /* Our patch code uses LSTAR for TPR caching. */ 2723 rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff); 2724 AssertRC(rc); 2725 } 2726 } 2727 else 2728 if (u8LastTPR != pVCpu->hwaccm.s.vmx.pVAPIC[0x80]) 2729 { 2730 rc = PDMApicSetTPR(pVCpu, pVCpu->hwaccm.s.vmx.pVAPIC[0x80]); 2731 AssertRC(rc); 2732 } 2705 2733 } 2706 2734 … … 2794 2822 #endif 2795 2823 Assert(!pVM->hwaccm.s.fNestedPaging); 2824 2825 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING 2826 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 2827 if ( pVM->hwaccm.s.fTRPPatchingAllowed 2828 && pVM->hwaccm.s.pGuestPatchMem 2829 && (exitQualification & 0xfff) == 0x080 2830 && !(errCode & X86_TRAP_PF_P) /* not present */ 2831 && CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0 2832 && !CPUMIsGuestInLongModeEx(pCtx) 2833 && pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches)) 2834 { 2835 RTGCPHYS GCPhysApicBase, GCPhys; 2836 PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */ 2837 GCPhysApicBase &= PAGE_BASE_GC_MASK; 2838 2839 rc = PGMGstGetPage(pVCpu, (RTGCPTR)exitQualification, NULL, &GCPhys); 2840 if ( rc == VINF_SUCCESS 2841 && GCPhys == GCPhysApicBase) 2842 { 2843 /* Only attempt to patch the instruction once. */ 2844 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2845 if (!pPatch) 2846 { 2847 rc = VINF_EM_HWACCM_PATCH_TPR_INSTR; 2848 break; 2849 } 2850 } 2851 } 2852 #endif 2796 2853 2797 2854 Log2(("Page fault at %RGv error code %x\n", exitQualification, errCode)); … … 3427 3484 } 3428 3485 3486 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */ 3487 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */ 3488 if ( pVM->hwaccm.s.fTPRPatchingActive 3489 && pCtx->ecx == MSR_K8_LSTAR) 3490 { 3491 Assert(!CPUMIsGuestInLongModeEx(pCtx)); 3492 if ((pCtx->eax & 0xff) != u8LastTPR) 3493 { 3494 Log(("VMX: Faulting MSR_K8_LSTAR write with new TPR value %x\n", pCtx->eax & 0xff)); 3495 3496 /* Our patch code uses LSTAR for TPR caching. */ 3497 rc = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff); 3498 AssertRC(rc); 3499 } 3500 3501 /* Skip the instruction and continue. */ 3502 pCtx->rip += cbInstr; /* wrmsr = [0F 30] */ 3503 3504 /* Only resume if successful. */ 3505 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x); 3506 goto ResumeExecution; 3507 } 3508 /* no break */ 3429 3509 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */ 3430 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */3431 3510 { 3432 3511 uint32_t cbSize;
Note:
See TracChangeset
for help on using the changeset viewer.

